code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def _summarize_inputs(samples, out_dir):
"""Summarize inputs for MultiQC reporting in display.
"""
logger.info("summarize target information")
if samples[0].get("analysis", "").lower() in ["variant", "variant2"]:
metrics_dir = utils.safe_makedir(os.path.join(out_dir, "report", "metrics"))
samples = _merge_target_information(samples, metrics_dir)
logger.info("summarize fastqc")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "fastqc"))
with utils.chdir(out_dir):
_merge_fastqc(samples)
preseq_samples = [s for s in samples if tz.get_in(["config", "algorithm", "preseq"], s)]
if preseq_samples:
logger.info("summarize preseq")
out_dir = utils.safe_makedir(os.path.join(out_dir, "report", "preseq"))
with utils.chdir(out_dir):
_merge_preseq(preseq_samples)
return samples
|
def function[_summarize_inputs, parameter[samples, out_dir]]:
constant[Summarize inputs for MultiQC reporting in display.
]
call[name[logger].info, parameter[constant[summarize target information]]]
if compare[call[call[call[name[samples]][constant[0]].get, parameter[constant[analysis], constant[]]].lower, parameter[]] in list[[<ast.Constant object at 0x7da1b1832680>, <ast.Constant object at 0x7da1b1833bb0>]]] begin[:]
variable[metrics_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[name[out_dir], constant[report], constant[metrics]]]]]
variable[samples] assign[=] call[name[_merge_target_information], parameter[name[samples], name[metrics_dir]]]
call[name[logger].info, parameter[constant[summarize fastqc]]]
variable[out_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[name[out_dir], constant[report], constant[fastqc]]]]]
with call[name[utils].chdir, parameter[name[out_dir]]] begin[:]
call[name[_merge_fastqc], parameter[name[samples]]]
variable[preseq_samples] assign[=] <ast.ListComp object at 0x7da1b1844520>
if name[preseq_samples] begin[:]
call[name[logger].info, parameter[constant[summarize preseq]]]
variable[out_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[name[out_dir], constant[report], constant[preseq]]]]]
with call[name[utils].chdir, parameter[name[out_dir]]] begin[:]
call[name[_merge_preseq], parameter[name[preseq_samples]]]
return[name[samples]]
|
keyword[def] identifier[_summarize_inputs] ( identifier[samples] , identifier[out_dir] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
keyword[if] identifier[samples] [ literal[int] ]. identifier[get] ( literal[string] , literal[string] ). identifier[lower] () keyword[in] [ literal[string] , literal[string] ]:
identifier[metrics_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] , literal[string] ))
identifier[samples] = identifier[_merge_target_information] ( identifier[samples] , identifier[metrics_dir] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[out_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] , literal[string] ))
keyword[with] identifier[utils] . identifier[chdir] ( identifier[out_dir] ):
identifier[_merge_fastqc] ( identifier[samples] )
identifier[preseq_samples] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[samples] keyword[if] identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[s] )]
keyword[if] identifier[preseq_samples] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[out_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] , literal[string] ))
keyword[with] identifier[utils] . identifier[chdir] ( identifier[out_dir] ):
identifier[_merge_preseq] ( identifier[preseq_samples] )
keyword[return] identifier[samples]
|
def _summarize_inputs(samples, out_dir):
"""Summarize inputs for MultiQC reporting in display.
"""
logger.info('summarize target information')
if samples[0].get('analysis', '').lower() in ['variant', 'variant2']:
metrics_dir = utils.safe_makedir(os.path.join(out_dir, 'report', 'metrics'))
samples = _merge_target_information(samples, metrics_dir) # depends on [control=['if'], data=[]]
logger.info('summarize fastqc')
out_dir = utils.safe_makedir(os.path.join(out_dir, 'report', 'fastqc'))
with utils.chdir(out_dir):
_merge_fastqc(samples) # depends on [control=['with'], data=[]]
preseq_samples = [s for s in samples if tz.get_in(['config', 'algorithm', 'preseq'], s)]
if preseq_samples:
logger.info('summarize preseq')
out_dir = utils.safe_makedir(os.path.join(out_dir, 'report', 'preseq'))
with utils.chdir(out_dir):
_merge_preseq(preseq_samples) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]]
return samples
|
def save_state_with_reason(self, reason, progress, snapshot, state_file_path, pause_vm):
"""Internal method for triggering a VM save state with a specified reason
code. The reason code can be interpreted by device/drivers and thus it
might behave slightly differently than a normal VM save state.
This call is fully synchronous, and the caller is expected to have set
the machine state appropriately (and has to set the follow-up machine
state if this call failed).
:py:func:`IMachine.save_state`
in reason of type :class:`Reason`
Specify the best matching reason code please.
in progress of type :class:`IProgress`
Progress object to track the operation completion.
in snapshot of type :class:`ISnapshot`
Snapshot object for which this save state operation is executed.
in state_file_path of type str
File path the VM process must save the execution state to.
in pause_vm of type bool
The VM should be paused before saving state. It is automatically
unpaused on error in the "vanilla save state" case.
return left_paused of type bool
Returns if the VM was left in paused state, which is necessary
in many situations (snapshots, teleportation).
raises :class:`VBoxErrorInvalidVmState`
Virtual machine state is not one of the expected values.
raises :class:`VBoxErrorFileError`
Failed to create directory for saved state file.
"""
if not isinstance(reason, Reason):
raise TypeError("reason can only be an instance of type Reason")
if not isinstance(progress, IProgress):
raise TypeError("progress can only be an instance of type IProgress")
if not isinstance(snapshot, ISnapshot):
raise TypeError("snapshot can only be an instance of type ISnapshot")
if not isinstance(state_file_path, basestring):
raise TypeError("state_file_path can only be an instance of type basestring")
if not isinstance(pause_vm, bool):
raise TypeError("pause_vm can only be an instance of type bool")
left_paused = self._call("saveStateWithReason",
in_p=[reason, progress, snapshot, state_file_path, pause_vm])
return left_paused
|
def function[save_state_with_reason, parameter[self, reason, progress, snapshot, state_file_path, pause_vm]]:
constant[Internal method for triggering a VM save state with a specified reason
code. The reason code can be interpreted by device/drivers and thus it
might behave slightly differently than a normal VM save state.
This call is fully synchronous, and the caller is expected to have set
the machine state appropriately (and has to set the follow-up machine
state if this call failed).
:py:func:`IMachine.save_state`
in reason of type :class:`Reason`
Specify the best matching reason code please.
in progress of type :class:`IProgress`
Progress object to track the operation completion.
in snapshot of type :class:`ISnapshot`
Snapshot object for which this save state operation is executed.
in state_file_path of type str
File path the VM process must save the execution state to.
in pause_vm of type bool
The VM should be paused before saving state. It is automatically
unpaused on error in the "vanilla save state" case.
return left_paused of type bool
Returns if the VM was left in paused state, which is necessary
in many situations (snapshots, teleportation).
raises :class:`VBoxErrorInvalidVmState`
Virtual machine state is not one of the expected values.
raises :class:`VBoxErrorFileError`
Failed to create directory for saved state file.
]
if <ast.UnaryOp object at 0x7da2043473a0> begin[:]
<ast.Raise object at 0x7da204345ff0>
if <ast.UnaryOp object at 0x7da2043459c0> begin[:]
<ast.Raise object at 0x7da204345420>
if <ast.UnaryOp object at 0x7da2043469e0> begin[:]
<ast.Raise object at 0x7da2043441f0>
if <ast.UnaryOp object at 0x7da204346d10> begin[:]
<ast.Raise object at 0x7da204345060>
if <ast.UnaryOp object at 0x7da204346290> begin[:]
<ast.Raise object at 0x7da2043442b0>
variable[left_paused] assign[=] call[name[self]._call, parameter[constant[saveStateWithReason]]]
return[name[left_paused]]
|
keyword[def] identifier[save_state_with_reason] ( identifier[self] , identifier[reason] , identifier[progress] , identifier[snapshot] , identifier[state_file_path] , identifier[pause_vm] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[reason] , identifier[Reason] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[progress] , identifier[IProgress] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[snapshot] , identifier[ISnapshot] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[state_file_path] , identifier[basestring] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[pause_vm] , identifier[bool] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[left_paused] = identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[reason] , identifier[progress] , identifier[snapshot] , identifier[state_file_path] , identifier[pause_vm] ])
keyword[return] identifier[left_paused]
|
def save_state_with_reason(self, reason, progress, snapshot, state_file_path, pause_vm):
"""Internal method for triggering a VM save state with a specified reason
code. The reason code can be interpreted by device/drivers and thus it
might behave slightly differently than a normal VM save state.
This call is fully synchronous, and the caller is expected to have set
the machine state appropriately (and has to set the follow-up machine
state if this call failed).
:py:func:`IMachine.save_state`
in reason of type :class:`Reason`
Specify the best matching reason code please.
in progress of type :class:`IProgress`
Progress object to track the operation completion.
in snapshot of type :class:`ISnapshot`
Snapshot object for which this save state operation is executed.
in state_file_path of type str
File path the VM process must save the execution state to.
in pause_vm of type bool
The VM should be paused before saving state. It is automatically
unpaused on error in the "vanilla save state" case.
return left_paused of type bool
Returns if the VM was left in paused state, which is necessary
in many situations (snapshots, teleportation).
raises :class:`VBoxErrorInvalidVmState`
Virtual machine state is not one of the expected values.
raises :class:`VBoxErrorFileError`
Failed to create directory for saved state file.
"""
if not isinstance(reason, Reason):
raise TypeError('reason can only be an instance of type Reason') # depends on [control=['if'], data=[]]
if not isinstance(progress, IProgress):
raise TypeError('progress can only be an instance of type IProgress') # depends on [control=['if'], data=[]]
if not isinstance(snapshot, ISnapshot):
raise TypeError('snapshot can only be an instance of type ISnapshot') # depends on [control=['if'], data=[]]
if not isinstance(state_file_path, basestring):
raise TypeError('state_file_path can only be an instance of type basestring') # depends on [control=['if'], data=[]]
if not isinstance(pause_vm, bool):
raise TypeError('pause_vm can only be an instance of type bool') # depends on [control=['if'], data=[]]
left_paused = self._call('saveStateWithReason', in_p=[reason, progress, snapshot, state_file_path, pause_vm])
return left_paused
|
def create_on_task(self, task, params={}, **options):
"""Adds a comment to a task. The comment will be authored by the
currently authenticated user, and timestamped when the server receives
the request.
Returns the full record for the new story added to the task.
Parameters
----------
task : {Id} Globally unique identifier for the task.
[data] : {Object} Data for the request
- text : {String} The plain text of the comment to add.
"""
path = "/tasks/%s/stories" % (task)
return self.client.post(path, params, **options)
|
def function[create_on_task, parameter[self, task, params]]:
constant[Adds a comment to a task. The comment will be authored by the
currently authenticated user, and timestamped when the server receives
the request.
Returns the full record for the new story added to the task.
Parameters
----------
task : {Id} Globally unique identifier for the task.
[data] : {Object} Data for the request
- text : {String} The plain text of the comment to add.
]
variable[path] assign[=] binary_operation[constant[/tasks/%s/stories] <ast.Mod object at 0x7da2590d6920> name[task]]
return[call[name[self].client.post, parameter[name[path], name[params]]]]
|
keyword[def] identifier[create_on_task] ( identifier[self] , identifier[task] , identifier[params] ={},** identifier[options] ):
literal[string]
identifier[path] = literal[string] %( identifier[task] )
keyword[return] identifier[self] . identifier[client] . identifier[post] ( identifier[path] , identifier[params] ,** identifier[options] )
|
def create_on_task(self, task, params={}, **options):
"""Adds a comment to a task. The comment will be authored by the
currently authenticated user, and timestamped when the server receives
the request.
Returns the full record for the new story added to the task.
Parameters
----------
task : {Id} Globally unique identifier for the task.
[data] : {Object} Data for the request
- text : {String} The plain text of the comment to add.
"""
path = '/tasks/%s/stories' % task
return self.client.post(path, params, **options)
|
def _map_smtp_headers_to_api_parameters(self, email_message):
"""
Map the values passed in SMTP headers to API-ready
2-item tuples present in HEADERS_MAP
header values must be a single string or list or tuple of strings
:return: 2-item tuples of the form (api_name, api_values)
"""
api_data = []
for smtp_key, api_transformer in six.iteritems(self._headers_map):
data_to_transform = email_message.extra_headers.pop(smtp_key, None)
if data_to_transform is not None:
if isinstance(data_to_transform, (list, tuple)):
# map each value in the tuple/list
for data in data_to_transform:
api_data.append((api_transformer[0], api_transformer[1](data)))
elif isinstance(data_to_transform, dict):
for data in six.iteritems(data_to_transform):
api_data.append(api_transformer(data))
else:
# we only have one value
api_data.append((api_transformer[0], api_transformer[1](data_to_transform)))
return api_data
|
def function[_map_smtp_headers_to_api_parameters, parameter[self, email_message]]:
constant[
Map the values passed in SMTP headers to API-ready
2-item tuples present in HEADERS_MAP
header values must be a single string or list or tuple of strings
:return: 2-item tuples of the form (api_name, api_values)
]
variable[api_data] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1a2b040>, <ast.Name object at 0x7da1b1a28f40>]]] in starred[call[name[six].iteritems, parameter[name[self]._headers_map]]] begin[:]
variable[data_to_transform] assign[=] call[name[email_message].extra_headers.pop, parameter[name[smtp_key], constant[None]]]
if compare[name[data_to_transform] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[data_to_transform], tuple[[<ast.Name object at 0x7da1b1a299f0>, <ast.Name object at 0x7da1b1a288e0>]]]] begin[:]
for taget[name[data]] in starred[name[data_to_transform]] begin[:]
call[name[api_data].append, parameter[tuple[[<ast.Subscript object at 0x7da1b1a2a1d0>, <ast.Call object at 0x7da1b1a28730>]]]]
return[name[api_data]]
|
keyword[def] identifier[_map_smtp_headers_to_api_parameters] ( identifier[self] , identifier[email_message] ):
literal[string]
identifier[api_data] =[]
keyword[for] identifier[smtp_key] , identifier[api_transformer] keyword[in] identifier[six] . identifier[iteritems] ( identifier[self] . identifier[_headers_map] ):
identifier[data_to_transform] = identifier[email_message] . identifier[extra_headers] . identifier[pop] ( identifier[smtp_key] , keyword[None] )
keyword[if] identifier[data_to_transform] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[data_to_transform] ,( identifier[list] , identifier[tuple] )):
keyword[for] identifier[data] keyword[in] identifier[data_to_transform] :
identifier[api_data] . identifier[append] (( identifier[api_transformer] [ literal[int] ], identifier[api_transformer] [ literal[int] ]( identifier[data] )))
keyword[elif] identifier[isinstance] ( identifier[data_to_transform] , identifier[dict] ):
keyword[for] identifier[data] keyword[in] identifier[six] . identifier[iteritems] ( identifier[data_to_transform] ):
identifier[api_data] . identifier[append] ( identifier[api_transformer] ( identifier[data] ))
keyword[else] :
identifier[api_data] . identifier[append] (( identifier[api_transformer] [ literal[int] ], identifier[api_transformer] [ literal[int] ]( identifier[data_to_transform] )))
keyword[return] identifier[api_data]
|
def _map_smtp_headers_to_api_parameters(self, email_message):
"""
Map the values passed in SMTP headers to API-ready
2-item tuples present in HEADERS_MAP
header values must be a single string or list or tuple of strings
:return: 2-item tuples of the form (api_name, api_values)
"""
api_data = []
for (smtp_key, api_transformer) in six.iteritems(self._headers_map):
data_to_transform = email_message.extra_headers.pop(smtp_key, None)
if data_to_transform is not None:
if isinstance(data_to_transform, (list, tuple)):
# map each value in the tuple/list
for data in data_to_transform:
api_data.append((api_transformer[0], api_transformer[1](data))) # depends on [control=['for'], data=['data']] # depends on [control=['if'], data=[]]
elif isinstance(data_to_transform, dict):
for data in six.iteritems(data_to_transform):
api_data.append(api_transformer(data)) # depends on [control=['for'], data=['data']] # depends on [control=['if'], data=[]]
else:
# we only have one value
api_data.append((api_transformer[0], api_transformer[1](data_to_transform))) # depends on [control=['if'], data=['data_to_transform']] # depends on [control=['for'], data=[]]
return api_data
|
def promote_artifacts(self, promote_stage='latest'):
"""Promote artifact version to dest.
Args:
promote_stage (string): Stage that is being promoted
"""
if promote_stage.lower() == 'alpha':
self._sync_to_uri(self.s3_canary_uri)
elif promote_stage.lower() == 'canary':
self._sync_to_uri(self.s3_latest_uri)
else:
self._sync_to_uri(self.s3_latest_uri)
|
def function[promote_artifacts, parameter[self, promote_stage]]:
constant[Promote artifact version to dest.
Args:
promote_stage (string): Stage that is being promoted
]
if compare[call[name[promote_stage].lower, parameter[]] equal[==] constant[alpha]] begin[:]
call[name[self]._sync_to_uri, parameter[name[self].s3_canary_uri]]
|
keyword[def] identifier[promote_artifacts] ( identifier[self] , identifier[promote_stage] = literal[string] ):
literal[string]
keyword[if] identifier[promote_stage] . identifier[lower] ()== literal[string] :
identifier[self] . identifier[_sync_to_uri] ( identifier[self] . identifier[s3_canary_uri] )
keyword[elif] identifier[promote_stage] . identifier[lower] ()== literal[string] :
identifier[self] . identifier[_sync_to_uri] ( identifier[self] . identifier[s3_latest_uri] )
keyword[else] :
identifier[self] . identifier[_sync_to_uri] ( identifier[self] . identifier[s3_latest_uri] )
|
def promote_artifacts(self, promote_stage='latest'):
"""Promote artifact version to dest.
Args:
promote_stage (string): Stage that is being promoted
"""
if promote_stage.lower() == 'alpha':
self._sync_to_uri(self.s3_canary_uri) # depends on [control=['if'], data=[]]
elif promote_stage.lower() == 'canary':
self._sync_to_uri(self.s3_latest_uri) # depends on [control=['if'], data=[]]
else:
self._sync_to_uri(self.s3_latest_uri)
|
async def write(self, towrite: bytes, await_blocking=False):
"""
Appends towrite to the write queue
>>> await test.write(b"HELLO")
# Returns without wait time
>>> await test.write(b"HELLO", await_blocking = True)
# Returns when the bufer is flushed
:param towrite: Write buffer
:param await_blocking: wait for everything to be written
"""
await self._write(towrite)
# Wait for the output buffer to be flushed if requested
if await_blocking:
return await self.flush()
|
<ast.AsyncFunctionDef object at 0x7da20c6a93c0>
|
keyword[async] keyword[def] identifier[write] ( identifier[self] , identifier[towrite] : identifier[bytes] , identifier[await_blocking] = keyword[False] ):
literal[string]
keyword[await] identifier[self] . identifier[_write] ( identifier[towrite] )
keyword[if] identifier[await_blocking] :
keyword[return] keyword[await] identifier[self] . identifier[flush] ()
|
async def write(self, towrite: bytes, await_blocking=False):
"""
Appends towrite to the write queue
>>> await test.write(b"HELLO")
# Returns without wait time
>>> await test.write(b"HELLO", await_blocking = True)
# Returns when the bufer is flushed
:param towrite: Write buffer
:param await_blocking: wait for everything to be written
"""
await self._write(towrite)
# Wait for the output buffer to be flushed if requested
if await_blocking:
return await self.flush() # depends on [control=['if'], data=[]]
|
def get_keys(self, bucket, timeout=None):
"""
Lists all keys within a bucket.
"""
msg_code = riak.pb.messages.MSG_CODE_LIST_KEYS_REQ
codec = self._get_codec(msg_code)
stream = self.stream_keys(bucket, timeout=timeout)
return codec.decode_get_keys(stream)
|
def function[get_keys, parameter[self, bucket, timeout]]:
constant[
Lists all keys within a bucket.
]
variable[msg_code] assign[=] name[riak].pb.messages.MSG_CODE_LIST_KEYS_REQ
variable[codec] assign[=] call[name[self]._get_codec, parameter[name[msg_code]]]
variable[stream] assign[=] call[name[self].stream_keys, parameter[name[bucket]]]
return[call[name[codec].decode_get_keys, parameter[name[stream]]]]
|
keyword[def] identifier[get_keys] ( identifier[self] , identifier[bucket] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[msg_code] = identifier[riak] . identifier[pb] . identifier[messages] . identifier[MSG_CODE_LIST_KEYS_REQ]
identifier[codec] = identifier[self] . identifier[_get_codec] ( identifier[msg_code] )
identifier[stream] = identifier[self] . identifier[stream_keys] ( identifier[bucket] , identifier[timeout] = identifier[timeout] )
keyword[return] identifier[codec] . identifier[decode_get_keys] ( identifier[stream] )
|
def get_keys(self, bucket, timeout=None):
"""
Lists all keys within a bucket.
"""
msg_code = riak.pb.messages.MSG_CODE_LIST_KEYS_REQ
codec = self._get_codec(msg_code)
stream = self.stream_keys(bucket, timeout=timeout)
return codec.decode_get_keys(stream)
|
def count_items(self):
"""Counts Items in full_soup and soup. For debugging"""
soup_items = self.soup.findAll('item')
full_soup_items = self.full_soup.findAll('item')
return len(soup_items), len(full_soup_items)
|
def function[count_items, parameter[self]]:
constant[Counts Items in full_soup and soup. For debugging]
variable[soup_items] assign[=] call[name[self].soup.findAll, parameter[constant[item]]]
variable[full_soup_items] assign[=] call[name[self].full_soup.findAll, parameter[constant[item]]]
return[tuple[[<ast.Call object at 0x7da18f813550>, <ast.Call object at 0x7da18f8121a0>]]]
|
keyword[def] identifier[count_items] ( identifier[self] ):
literal[string]
identifier[soup_items] = identifier[self] . identifier[soup] . identifier[findAll] ( literal[string] )
identifier[full_soup_items] = identifier[self] . identifier[full_soup] . identifier[findAll] ( literal[string] )
keyword[return] identifier[len] ( identifier[soup_items] ), identifier[len] ( identifier[full_soup_items] )
|
def count_items(self):
"""Counts Items in full_soup and soup. For debugging"""
soup_items = self.soup.findAll('item')
full_soup_items = self.full_soup.findAll('item')
return (len(soup_items), len(full_soup_items))
|
def delete_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, **kwargs):
"""Delete CashOnDeliveryPayment
Delete an instance of CashOnDeliveryPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
else:
(data) = cls._delete_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
return data
|
def function[delete_cash_on_delivery_payment_by_id, parameter[cls, cash_on_delivery_payment_id]]:
constant[Delete CashOnDeliveryPayment
Delete an instance of CashOnDeliveryPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._delete_cash_on_delivery_payment_by_id_with_http_info, parameter[name[cash_on_delivery_payment_id]]]]
|
keyword[def] identifier[delete_cash_on_delivery_payment_by_id] ( identifier[cls] , identifier[cash_on_delivery_payment_id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_delete_cash_on_delivery_payment_by_id_with_http_info] ( identifier[cash_on_delivery_payment_id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_delete_cash_on_delivery_payment_by_id_with_http_info] ( identifier[cash_on_delivery_payment_id] ,** identifier[kwargs] )
keyword[return] identifier[data]
|
def delete_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, **kwargs):
"""Delete CashOnDeliveryPayment
Delete an instance of CashOnDeliveryPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._delete_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
return data
|
def model_name(allele, num):
"""
Generate a model name
Parameters
----------
allele : string
num : int
Returns
-------
string
"""
random_string = hashlib.sha1(
str(time.time()).encode()).hexdigest()[:16]
return "%s-%d-%s" % (allele.upper(), num, random_string)
|
def function[model_name, parameter[allele, num]]:
constant[
Generate a model name
Parameters
----------
allele : string
num : int
Returns
-------
string
]
variable[random_string] assign[=] call[call[call[name[hashlib].sha1, parameter[call[call[name[str], parameter[call[name[time].time, parameter[]]]].encode, parameter[]]]].hexdigest, parameter[]]][<ast.Slice object at 0x7da1b12951b0>]
return[binary_operation[constant[%s-%d-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da207f993f0>, <ast.Name object at 0x7da207f99270>, <ast.Name object at 0x7da207f9a4a0>]]]]
|
keyword[def] identifier[model_name] ( identifier[allele] , identifier[num] ):
literal[string]
identifier[random_string] = identifier[hashlib] . identifier[sha1] (
identifier[str] ( identifier[time] . identifier[time] ()). identifier[encode] ()). identifier[hexdigest] ()[: literal[int] ]
keyword[return] literal[string] %( identifier[allele] . identifier[upper] (), identifier[num] , identifier[random_string] )
|
def model_name(allele, num):
"""
Generate a model name
Parameters
----------
allele : string
num : int
Returns
-------
string
"""
random_string = hashlib.sha1(str(time.time()).encode()).hexdigest()[:16]
return '%s-%d-%s' % (allele.upper(), num, random_string)
|
def _get_version_info():
"""
Returns the currently-installed awslimitchecker version, and a best-effort
attempt at finding the origin URL and commit/tag if installed from an
editable git clone.
:returns: awslimitchecker version
:rtype: str
"""
if os.environ.get('VERSIONCHECK_DEBUG', '') != 'true':
for lname in ['versionfinder', 'pip', 'git']:
l = logging.getLogger(lname)
l.setLevel(logging.CRITICAL)
l.propagate = True
try:
vinfo = find_version('awslimitchecker')
dirty = ''
if vinfo.git_is_dirty:
dirty = '*'
tag = vinfo.git_tag
if tag is not None:
tag += dirty
commit = vinfo.git_commit
if commit is not None:
if len(commit) > 7:
commit = commit[:8]
commit += dirty
return AWSLimitCheckerVersion(
vinfo.version,
vinfo.url,
tag=tag,
commit=commit
)
except Exception:
logger.exception("Error checking installed version; this installation "
"may not be in compliance with the AGPLv3 license:")
# fall back to returning just the hard-coded release information
return AWSLimitCheckerVersion(_VERSION, _PROJECT_URL)
|
def function[_get_version_info, parameter[]]:
constant[
Returns the currently-installed awslimitchecker version, and a best-effort
attempt at finding the origin URL and commit/tag if installed from an
editable git clone.
:returns: awslimitchecker version
:rtype: str
]
if compare[call[name[os].environ.get, parameter[constant[VERSIONCHECK_DEBUG], constant[]]] not_equal[!=] constant[true]] begin[:]
for taget[name[lname]] in starred[list[[<ast.Constant object at 0x7da20c6e57b0>, <ast.Constant object at 0x7da20c6e5a80>, <ast.Constant object at 0x7da20c6e4fa0>]]] begin[:]
variable[l] assign[=] call[name[logging].getLogger, parameter[name[lname]]]
call[name[l].setLevel, parameter[name[logging].CRITICAL]]
name[l].propagate assign[=] constant[True]
<ast.Try object at 0x7da20c6e55a0>
return[call[name[AWSLimitCheckerVersion], parameter[name[_VERSION], name[_PROJECT_URL]]]]
|
keyword[def] identifier[_get_version_info] ():
literal[string]
keyword[if] identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )!= literal[string] :
keyword[for] identifier[lname] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[l] = identifier[logging] . identifier[getLogger] ( identifier[lname] )
identifier[l] . identifier[setLevel] ( identifier[logging] . identifier[CRITICAL] )
identifier[l] . identifier[propagate] = keyword[True]
keyword[try] :
identifier[vinfo] = identifier[find_version] ( literal[string] )
identifier[dirty] = literal[string]
keyword[if] identifier[vinfo] . identifier[git_is_dirty] :
identifier[dirty] = literal[string]
identifier[tag] = identifier[vinfo] . identifier[git_tag]
keyword[if] identifier[tag] keyword[is] keyword[not] keyword[None] :
identifier[tag] += identifier[dirty]
identifier[commit] = identifier[vinfo] . identifier[git_commit]
keyword[if] identifier[commit] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[len] ( identifier[commit] )> literal[int] :
identifier[commit] = identifier[commit] [: literal[int] ]
identifier[commit] += identifier[dirty]
keyword[return] identifier[AWSLimitCheckerVersion] (
identifier[vinfo] . identifier[version] ,
identifier[vinfo] . identifier[url] ,
identifier[tag] = identifier[tag] ,
identifier[commit] = identifier[commit]
)
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string]
literal[string] )
keyword[return] identifier[AWSLimitCheckerVersion] ( identifier[_VERSION] , identifier[_PROJECT_URL] )
|
def _get_version_info():
"""
Returns the currently-installed awslimitchecker version, and a best-effort
attempt at finding the origin URL and commit/tag if installed from an
editable git clone.
:returns: awslimitchecker version
:rtype: str
"""
if os.environ.get('VERSIONCHECK_DEBUG', '') != 'true':
for lname in ['versionfinder', 'pip', 'git']:
l = logging.getLogger(lname)
l.setLevel(logging.CRITICAL)
l.propagate = True # depends on [control=['for'], data=['lname']] # depends on [control=['if'], data=[]]
try:
vinfo = find_version('awslimitchecker')
dirty = ''
if vinfo.git_is_dirty:
dirty = '*' # depends on [control=['if'], data=[]]
tag = vinfo.git_tag
if tag is not None:
tag += dirty # depends on [control=['if'], data=['tag']]
commit = vinfo.git_commit
if commit is not None:
if len(commit) > 7:
commit = commit[:8] # depends on [control=['if'], data=[]]
commit += dirty # depends on [control=['if'], data=['commit']]
return AWSLimitCheckerVersion(vinfo.version, vinfo.url, tag=tag, commit=commit) # depends on [control=['try'], data=[]]
except Exception:
logger.exception('Error checking installed version; this installation may not be in compliance with the AGPLv3 license:') # depends on [control=['except'], data=[]]
# fall back to returning just the hard-coded release information
return AWSLimitCheckerVersion(_VERSION, _PROJECT_URL)
|
def clean(self):
""" Check user has cookies enabled
"""
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError("Cookies must be enabled.")
return self.cleaned_data
|
def function[clean, parameter[self]]:
constant[ Check user has cookies enabled
]
if name[self].request begin[:]
if <ast.UnaryOp object at 0x7da18dc06890> begin[:]
<ast.Raise object at 0x7da18dc045b0>
return[name[self].cleaned_data]
|
keyword[def] identifier[clean] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[request] :
keyword[if] keyword[not] identifier[self] . identifier[request] . identifier[session] . identifier[test_cookie_worked] ():
keyword[raise] identifier[forms] . identifier[ValidationError] ( literal[string] )
keyword[return] identifier[self] . identifier[cleaned_data]
|
def clean(self):
""" Check user has cookies enabled
"""
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError('Cookies must be enabled.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self.cleaned_data
|
def get_form_kwargs(self):
"""
initialize default value that won't be displayed
:return:
"""
kwargs = super(UserServiceUpdateView, self).get_form_kwargs()
kwargs['initial']['user'] = self.request.user
kwargs['initial']['name'] = self.object.name
return kwargs
|
def function[get_form_kwargs, parameter[self]]:
constant[
initialize default value that won't be displayed
:return:
]
variable[kwargs] assign[=] call[call[name[super], parameter[name[UserServiceUpdateView], name[self]]].get_form_kwargs, parameter[]]
call[call[name[kwargs]][constant[initial]]][constant[user]] assign[=] name[self].request.user
call[call[name[kwargs]][constant[initial]]][constant[name]] assign[=] name[self].object.name
return[name[kwargs]]
|
keyword[def] identifier[get_form_kwargs] ( identifier[self] ):
literal[string]
identifier[kwargs] = identifier[super] ( identifier[UserServiceUpdateView] , identifier[self] ). identifier[get_form_kwargs] ()
identifier[kwargs] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[request] . identifier[user]
identifier[kwargs] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[object] . identifier[name]
keyword[return] identifier[kwargs]
|
def get_form_kwargs(self):
"""
initialize default value that won't be displayed
:return:
"""
kwargs = super(UserServiceUpdateView, self).get_form_kwargs()
kwargs['initial']['user'] = self.request.user
kwargs['initial']['name'] = self.object.name
return kwargs
|
async def download_file_by_id(self, file_id: base.String, destination=None,
timeout: base.Integer = 30, chunk_size: base.Integer = 65536,
seek: base.Boolean = True):
"""
Download file by file_id to destination
if You want to automatically create destination (:class:`io.BytesIO`) use default
value of destination and handle result of this method.
:param file_id: str
:param destination: filename or instance of :class:`io.IOBase`. For e. g. :class:`io.BytesIO`
:param timeout: int
:param chunk_size: int
:param seek: bool - go to start of file when downloading is finished
:return: destination
"""
file = await self.get_file(file_id)
return await self.download_file(file_path=file.file_path, destination=destination,
timeout=timeout, chunk_size=chunk_size, seek=seek)
|
<ast.AsyncFunctionDef object at 0x7da1b18fb760>
|
keyword[async] keyword[def] identifier[download_file_by_id] ( identifier[self] , identifier[file_id] : identifier[base] . identifier[String] , identifier[destination] = keyword[None] ,
identifier[timeout] : identifier[base] . identifier[Integer] = literal[int] , identifier[chunk_size] : identifier[base] . identifier[Integer] = literal[int] ,
identifier[seek] : identifier[base] . identifier[Boolean] = keyword[True] ):
literal[string]
identifier[file] = keyword[await] identifier[self] . identifier[get_file] ( identifier[file_id] )
keyword[return] keyword[await] identifier[self] . identifier[download_file] ( identifier[file_path] = identifier[file] . identifier[file_path] , identifier[destination] = identifier[destination] ,
identifier[timeout] = identifier[timeout] , identifier[chunk_size] = identifier[chunk_size] , identifier[seek] = identifier[seek] )
|
async def download_file_by_id(self, file_id: base.String, destination=None, timeout: base.Integer=30, chunk_size: base.Integer=65536, seek: base.Boolean=True):
"""
Download file by file_id to destination
if You want to automatically create destination (:class:`io.BytesIO`) use default
value of destination and handle result of this method.
:param file_id: str
:param destination: filename or instance of :class:`io.IOBase`. For e. g. :class:`io.BytesIO`
:param timeout: int
:param chunk_size: int
:param seek: bool - go to start of file when downloading is finished
:return: destination
"""
file = await self.get_file(file_id)
return await self.download_file(file_path=file.file_path, destination=destination, timeout=timeout, chunk_size=chunk_size, seek=seek)
|
def get_currency_symbols(self) -> List[str]:
""" Returns the used currencies' symbols as an array """
result = []
currencies = self.currencies.get_book_currencies()
for cur in currencies:
result.append(cur.mnemonic)
return result
|
def function[get_currency_symbols, parameter[self]]:
constant[ Returns the used currencies' symbols as an array ]
variable[result] assign[=] list[[]]
variable[currencies] assign[=] call[name[self].currencies.get_book_currencies, parameter[]]
for taget[name[cur]] in starred[name[currencies]] begin[:]
call[name[result].append, parameter[name[cur].mnemonic]]
return[name[result]]
|
keyword[def] identifier[get_currency_symbols] ( identifier[self] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[result] =[]
identifier[currencies] = identifier[self] . identifier[currencies] . identifier[get_book_currencies] ()
keyword[for] identifier[cur] keyword[in] identifier[currencies] :
identifier[result] . identifier[append] ( identifier[cur] . identifier[mnemonic] )
keyword[return] identifier[result]
|
def get_currency_symbols(self) -> List[str]:
""" Returns the used currencies' symbols as an array """
result = []
currencies = self.currencies.get_book_currencies()
for cur in currencies:
result.append(cur.mnemonic) # depends on [control=['for'], data=['cur']]
return result
|
def snow_partitioning_n(im, r_max=4, sigma=0.4, return_all=True,
mask=True, randomize=False, alias=None):
r"""
This function partitions an imaging oontain an arbitrary number of phases
into regions using a marker-based watershed segmentation. Its an extension
of snow_partitioning function with all phases partitioned together.
Parameters
----------
im : ND-array
Image of porous material where each phase is represented by unique
integer starting from 1 (0's are ignored).
r_max : scalar
The radius of the spherical structuring element to use in the Maximum
filter stage that is used to find peaks. The default is 4.
sigma : scalar
The standard deviation of the Gaussian filter used. The default is
0.4. If 0 is given then the filter is not applied, which is useful if a
distance transform is supplied as the ``im`` argument that has already
been processed.
return_all : boolean (default is False)
If set to ``True`` a named tuple is returned containing the original
image, the combined distance transform, list of each phase max label,
and the final combined regions of all phases.
mask : boolean (default is True)
Apply a mask to the regions which are not under concern.
randomize : boolean
If ``True`` (default), then the region colors will be randomized before
returning. This is helpful for visualizing otherwise neighboring
regions have similar coloring and are hard to distinguish.
alias : dict (Optional)
A dictionary that assigns unique image label to specific phases. For
example {1: 'Solid'} will show all structural properties associated
with label 1 as Solid phase properties. If ``None`` then default
labelling will be used i.e {1: 'Phase1',..}.
Returns
-------
An image the same shape as ``im`` with the all phases partitioned into
regions using a marker based watershed with the peaks found by the
SNOW algorithm [1]. If ``return_all`` is ``True`` then a **named tuple**
is returned with the following attribute:
* ``im`` : The actual image of the porous material
* ``dt`` : The combined distance transform of the image
* ``phase_max_label`` : The list of max label of each phase in order to
distinguish between each other
* ``regions`` : The partitioned regions of n phases using a marker
based watershed with the peaks found by the SNOW algorithm
References
----------
[1] Gostick, J. "A versatile and efficient network extraction algorithm
using marker-based watershed segmentation". Physical Review E. (2017)
[2] Khan, ZA et al. "Dual network extraction algorithm to investigate
multiple transport processes in porous materials: Image-based modeling
of pore and grain-scale processes". Computers in Chemical Engineering.
(2019)
See Also
----------
snow_partitioning
Notes
-----
In principle it is possible to perform a distance transform on each
phase separately, merge these into a single image, then apply the
watershed only once. This, however, has been found to create edge artifacts
between regions arising from the way watershed handles plateaus in the
distance transform. To overcome this, this function applies the watershed
to each of the distance transforms separately, then merges the segmented
regions back into a single image.
"""
# Get alias if provided by user
al = _create_alias_map(im=im, alias=alias)
# Perform snow on each phase and merge all segmentation and dt together
phases_num = sp.unique(im * 1)
phases_num = sp.trim_zeros(phases_num)
combined_dt = 0
combined_region = 0
num = [0]
for i in phases_num:
print('_' * 60)
if alias is None:
print('Processing Phase {}'.format(i))
else:
print('Processing Phase {}'.format(al[i]))
phase_snow = snow_partitioning(im == i,
dt=None, r_max=r_max, sigma=sigma,
return_all=return_all, mask=mask,
randomize=randomize)
if len(phases_num) == 1 and phases_num == 1:
combined_dt = phase_snow.dt
combined_region = phase_snow.regions
else:
combined_dt += phase_snow.dt
phase_snow.regions *= phase_snow.im
phase_snow.regions += num[i - 1]
phase_ws = phase_snow.regions * phase_snow.im
phase_ws[phase_ws == num[i - 1]] = 0
combined_region += phase_ws
num.append(sp.amax(combined_region))
if return_all:
tup = namedtuple('results', field_names=['im', 'dt', 'phase_max_label',
'regions'])
tup.im = im
tup.dt = combined_dt
tup.phase_max_label = num[1:]
tup.regions = combined_region
return tup
else:
return combined_region
|
def function[snow_partitioning_n, parameter[im, r_max, sigma, return_all, mask, randomize, alias]]:
constant[
This function partitions an imaging oontain an arbitrary number of phases
into regions using a marker-based watershed segmentation. Its an extension
of snow_partitioning function with all phases partitioned together.
Parameters
----------
im : ND-array
Image of porous material where each phase is represented by unique
integer starting from 1 (0's are ignored).
r_max : scalar
The radius of the spherical structuring element to use in the Maximum
filter stage that is used to find peaks. The default is 4.
sigma : scalar
The standard deviation of the Gaussian filter used. The default is
0.4. If 0 is given then the filter is not applied, which is useful if a
distance transform is supplied as the ``im`` argument that has already
been processed.
return_all : boolean (default is False)
If set to ``True`` a named tuple is returned containing the original
image, the combined distance transform, list of each phase max label,
and the final combined regions of all phases.
mask : boolean (default is True)
Apply a mask to the regions which are not under concern.
randomize : boolean
If ``True`` (default), then the region colors will be randomized before
returning. This is helpful for visualizing otherwise neighboring
regions have similar coloring and are hard to distinguish.
alias : dict (Optional)
A dictionary that assigns unique image label to specific phases. For
example {1: 'Solid'} will show all structural properties associated
with label 1 as Solid phase properties. If ``None`` then default
labelling will be used i.e {1: 'Phase1',..}.
Returns
-------
An image the same shape as ``im`` with the all phases partitioned into
regions using a marker based watershed with the peaks found by the
SNOW algorithm [1]. If ``return_all`` is ``True`` then a **named tuple**
is returned with the following attribute:
* ``im`` : The actual image of the porous material
* ``dt`` : The combined distance transform of the image
* ``phase_max_label`` : The list of max label of each phase in order to
distinguish between each other
* ``regions`` : The partitioned regions of n phases using a marker
based watershed with the peaks found by the SNOW algorithm
References
----------
[1] Gostick, J. "A versatile and efficient network extraction algorithm
using marker-based watershed segmentation". Physical Review E. (2017)
[2] Khan, ZA et al. "Dual network extraction algorithm to investigate
multiple transport processes in porous materials: Image-based modeling
of pore and grain-scale processes". Computers in Chemical Engineering.
(2019)
See Also
----------
snow_partitioning
Notes
-----
In principle it is possible to perform a distance transform on each
phase separately, merge these into a single image, then apply the
watershed only once. This, however, has been found to create edge artifacts
between regions arising from the way watershed handles plateaus in the
distance transform. To overcome this, this function applies the watershed
to each of the distance transforms separately, then merges the segmented
regions back into a single image.
]
variable[al] assign[=] call[name[_create_alias_map], parameter[]]
variable[phases_num] assign[=] call[name[sp].unique, parameter[binary_operation[name[im] * constant[1]]]]
variable[phases_num] assign[=] call[name[sp].trim_zeros, parameter[name[phases_num]]]
variable[combined_dt] assign[=] constant[0]
variable[combined_region] assign[=] constant[0]
variable[num] assign[=] list[[<ast.Constant object at 0x7da1b0563640>]]
for taget[name[i]] in starred[name[phases_num]] begin[:]
call[name[print], parameter[binary_operation[constant[_] * constant[60]]]]
if compare[name[alias] is constant[None]] begin[:]
call[name[print], parameter[call[constant[Processing Phase {}].format, parameter[name[i]]]]]
variable[phase_snow] assign[=] call[name[snow_partitioning], parameter[compare[name[im] equal[==] name[i]]]]
if <ast.BoolOp object at 0x7da1b0562bf0> begin[:]
variable[combined_dt] assign[=] name[phase_snow].dt
variable[combined_region] assign[=] name[phase_snow].regions
call[name[num].append, parameter[call[name[sp].amax, parameter[name[combined_region]]]]]
if name[return_all] begin[:]
variable[tup] assign[=] call[name[namedtuple], parameter[constant[results]]]
name[tup].im assign[=] name[im]
name[tup].dt assign[=] name[combined_dt]
name[tup].phase_max_label assign[=] call[name[num]][<ast.Slice object at 0x7da1b0544c40>]
name[tup].regions assign[=] name[combined_region]
return[name[tup]]
|
keyword[def] identifier[snow_partitioning_n] ( identifier[im] , identifier[r_max] = literal[int] , identifier[sigma] = literal[int] , identifier[return_all] = keyword[True] ,
identifier[mask] = keyword[True] , identifier[randomize] = keyword[False] , identifier[alias] = keyword[None] ):
literal[string]
identifier[al] = identifier[_create_alias_map] ( identifier[im] = identifier[im] , identifier[alias] = identifier[alias] )
identifier[phases_num] = identifier[sp] . identifier[unique] ( identifier[im] * literal[int] )
identifier[phases_num] = identifier[sp] . identifier[trim_zeros] ( identifier[phases_num] )
identifier[combined_dt] = literal[int]
identifier[combined_region] = literal[int]
identifier[num] =[ literal[int] ]
keyword[for] identifier[i] keyword[in] identifier[phases_num] :
identifier[print] ( literal[string] * literal[int] )
keyword[if] identifier[alias] keyword[is] keyword[None] :
identifier[print] ( literal[string] . identifier[format] ( identifier[i] ))
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[al] [ identifier[i] ]))
identifier[phase_snow] = identifier[snow_partitioning] ( identifier[im] == identifier[i] ,
identifier[dt] = keyword[None] , identifier[r_max] = identifier[r_max] , identifier[sigma] = identifier[sigma] ,
identifier[return_all] = identifier[return_all] , identifier[mask] = identifier[mask] ,
identifier[randomize] = identifier[randomize] )
keyword[if] identifier[len] ( identifier[phases_num] )== literal[int] keyword[and] identifier[phases_num] == literal[int] :
identifier[combined_dt] = identifier[phase_snow] . identifier[dt]
identifier[combined_region] = identifier[phase_snow] . identifier[regions]
keyword[else] :
identifier[combined_dt] += identifier[phase_snow] . identifier[dt]
identifier[phase_snow] . identifier[regions] *= identifier[phase_snow] . identifier[im]
identifier[phase_snow] . identifier[regions] += identifier[num] [ identifier[i] - literal[int] ]
identifier[phase_ws] = identifier[phase_snow] . identifier[regions] * identifier[phase_snow] . identifier[im]
identifier[phase_ws] [ identifier[phase_ws] == identifier[num] [ identifier[i] - literal[int] ]]= literal[int]
identifier[combined_region] += identifier[phase_ws]
identifier[num] . identifier[append] ( identifier[sp] . identifier[amax] ( identifier[combined_region] ))
keyword[if] identifier[return_all] :
identifier[tup] = identifier[namedtuple] ( literal[string] , identifier[field_names] =[ literal[string] , literal[string] , literal[string] ,
literal[string] ])
identifier[tup] . identifier[im] = identifier[im]
identifier[tup] . identifier[dt] = identifier[combined_dt]
identifier[tup] . identifier[phase_max_label] = identifier[num] [ literal[int] :]
identifier[tup] . identifier[regions] = identifier[combined_region]
keyword[return] identifier[tup]
keyword[else] :
keyword[return] identifier[combined_region]
|
def snow_partitioning_n(im, r_max=4, sigma=0.4, return_all=True, mask=True, randomize=False, alias=None):
"""
This function partitions an imaging oontain an arbitrary number of phases
into regions using a marker-based watershed segmentation. Its an extension
of snow_partitioning function with all phases partitioned together.
Parameters
----------
im : ND-array
Image of porous material where each phase is represented by unique
integer starting from 1 (0's are ignored).
r_max : scalar
The radius of the spherical structuring element to use in the Maximum
filter stage that is used to find peaks. The default is 4.
sigma : scalar
The standard deviation of the Gaussian filter used. The default is
0.4. If 0 is given then the filter is not applied, which is useful if a
distance transform is supplied as the ``im`` argument that has already
been processed.
return_all : boolean (default is False)
If set to ``True`` a named tuple is returned containing the original
image, the combined distance transform, list of each phase max label,
and the final combined regions of all phases.
mask : boolean (default is True)
Apply a mask to the regions which are not under concern.
randomize : boolean
If ``True`` (default), then the region colors will be randomized before
returning. This is helpful for visualizing otherwise neighboring
regions have similar coloring and are hard to distinguish.
alias : dict (Optional)
A dictionary that assigns unique image label to specific phases. For
example {1: 'Solid'} will show all structural properties associated
with label 1 as Solid phase properties. If ``None`` then default
labelling will be used i.e {1: 'Phase1',..}.
Returns
-------
An image the same shape as ``im`` with the all phases partitioned into
regions using a marker based watershed with the peaks found by the
SNOW algorithm [1]. If ``return_all`` is ``True`` then a **named tuple**
is returned with the following attribute:
* ``im`` : The actual image of the porous material
* ``dt`` : The combined distance transform of the image
* ``phase_max_label`` : The list of max label of each phase in order to
distinguish between each other
* ``regions`` : The partitioned regions of n phases using a marker
based watershed with the peaks found by the SNOW algorithm
References
----------
[1] Gostick, J. "A versatile and efficient network extraction algorithm
using marker-based watershed segmentation". Physical Review E. (2017)
[2] Khan, ZA et al. "Dual network extraction algorithm to investigate
multiple transport processes in porous materials: Image-based modeling
of pore and grain-scale processes". Computers in Chemical Engineering.
(2019)
See Also
----------
snow_partitioning
Notes
-----
In principle it is possible to perform a distance transform on each
phase separately, merge these into a single image, then apply the
watershed only once. This, however, has been found to create edge artifacts
between regions arising from the way watershed handles plateaus in the
distance transform. To overcome this, this function applies the watershed
to each of the distance transforms separately, then merges the segmented
regions back into a single image.
"""
# Get alias if provided by user
al = _create_alias_map(im=im, alias=alias)
# Perform snow on each phase and merge all segmentation and dt together
phases_num = sp.unique(im * 1)
phases_num = sp.trim_zeros(phases_num)
combined_dt = 0
combined_region = 0
num = [0]
for i in phases_num:
print('_' * 60)
if alias is None:
print('Processing Phase {}'.format(i)) # depends on [control=['if'], data=[]]
else:
print('Processing Phase {}'.format(al[i]))
phase_snow = snow_partitioning(im == i, dt=None, r_max=r_max, sigma=sigma, return_all=return_all, mask=mask, randomize=randomize)
if len(phases_num) == 1 and phases_num == 1:
combined_dt = phase_snow.dt
combined_region = phase_snow.regions # depends on [control=['if'], data=[]]
else:
combined_dt += phase_snow.dt
phase_snow.regions *= phase_snow.im
phase_snow.regions += num[i - 1]
phase_ws = phase_snow.regions * phase_snow.im
phase_ws[phase_ws == num[i - 1]] = 0
combined_region += phase_ws
num.append(sp.amax(combined_region)) # depends on [control=['for'], data=['i']]
if return_all:
tup = namedtuple('results', field_names=['im', 'dt', 'phase_max_label', 'regions'])
tup.im = im
tup.dt = combined_dt
tup.phase_max_label = num[1:]
tup.regions = combined_region
return tup # depends on [control=['if'], data=[]]
else:
return combined_region
|
def _set_auditpol_data(option, value):
'''
Helper function that updates the current applied settings to match what has
just been set in the audit.csv files. We're doing it this way instead of
running `gpupdate`
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
'''
auditpol_values = {'None': 'No Auditing',
'0': 'No Auditing',
'1': 'Success',
'2': 'Failure',
'3': 'Success and Failure'}
defaults = _get_audit_defaults(option)
return __utils__['auditpol.set_setting'](
name=defaults['Auditpol Name'],
value=auditpol_values[value])
|
def function[_set_auditpol_data, parameter[option, value]]:
constant[
Helper function that updates the current applied settings to match what has
just been set in the audit.csv files. We're doing it this way instead of
running `gpupdate`
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
]
variable[auditpol_values] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e51e0>, <ast.Constant object at 0x7da20c6e7580>, <ast.Constant object at 0x7da20c6e7a60>, <ast.Constant object at 0x7da20c6e7e80>, <ast.Constant object at 0x7da20c6e4a00>], [<ast.Constant object at 0x7da20c6e5570>, <ast.Constant object at 0x7da20c6e6e00>, <ast.Constant object at 0x7da20c6e6890>, <ast.Constant object at 0x7da20c6e5840>, <ast.Constant object at 0x7da20c6e7d90>]]
variable[defaults] assign[=] call[name[_get_audit_defaults], parameter[name[option]]]
return[call[call[name[__utils__]][constant[auditpol.set_setting]], parameter[]]]
|
keyword[def] identifier[_set_auditpol_data] ( identifier[option] , identifier[value] ):
literal[string]
identifier[auditpol_values] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] }
identifier[defaults] = identifier[_get_audit_defaults] ( identifier[option] )
keyword[return] identifier[__utils__] [ literal[string] ](
identifier[name] = identifier[defaults] [ literal[string] ],
identifier[value] = identifier[auditpol_values] [ identifier[value] ])
|
def _set_auditpol_data(option, value):
"""
Helper function that updates the current applied settings to match what has
just been set in the audit.csv files. We're doing it this way instead of
running `gpupdate`
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
"""
auditpol_values = {'None': 'No Auditing', '0': 'No Auditing', '1': 'Success', '2': 'Failure', '3': 'Success and Failure'}
defaults = _get_audit_defaults(option)
return __utils__['auditpol.set_setting'](name=defaults['Auditpol Name'], value=auditpol_values[value])
|
def read_touchstone(fname):
r"""
Read a `Touchstone <https://ibis.org/connector/touchstone_spec11.pdf>`_ file.
According to the specification a data line can have at most values for four
complex parameters (plus potentially the frequency point), however this
function is able to process malformed files as long as they have the
correct number of data points (:code:`points` x :code:`nports` x
:code:`nports` where :code:`points` represents the number of frequency
points and :code:`nports` represents the number of ports in the file). Per
the Touchstone specification noise data is only supported for two-port
files
:param fname: Touchstone file name
:type fname: `FileNameExists <https://pexdoc.readthedocs.io/en/stable/
ptypes.html#filenameexists>`_
:rtype: dictionary with the following structure:
* **nports** (*integer*) -- number of ports
* **opts** (:ref:`TouchstoneOptions`) -- File options
* **data** (:ref:`TouchstoneData`) -- Parameter data
* **noise** (:ref:`TouchstoneNoiseData`) -- Noise data, per the Touchstone
specification only supported in 2-port files
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.touchstone.read_touchstone
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (File *[fname]* does not have a valid extension)
* RuntimeError (File *[fname]* has no data)
* RuntimeError (First non-comment line is not the option line)
* RuntimeError (Frequency must increase)
* RuntimeError (Illegal data in line *[lineno]*)
* RuntimeError (Illegal option line)
* RuntimeError (Malformed data)
* RuntimeError (Malformed noise data)
* RuntimeError (Noise frequency must increase)
.. [[[end]]]
.. note:: The returned parameter(s) are complex numbers in real and
imaginary format regardless of the format used in the Touchstone file.
Similarly, the returned frequency vector unit is Hertz regardless of
the unit used in the Touchstone file
"""
# pylint: disable=R0912,R0915,W0702
# Exceptions definitions
exnports = pexdoc.exh.addex(
RuntimeError, "File *[fname]* does not have a valid extension"
)
exnoopt = pexdoc.exh.addex(
RuntimeError, "First non-comment line is not the option line"
)
exopt = pexdoc.exh.addex(RuntimeError, "Illegal option line")
exline = pexdoc.exh.addex(RuntimeError, "Illegal data in line *[lineno]*")
exnodata = pexdoc.exh.addex(RuntimeError, "File *[fname]* has no data")
exdata = pexdoc.exh.addex(RuntimeError, "Malformed data")
exndata = pexdoc.exh.addex(RuntimeError, "Malformed noise data")
exfreq = pexdoc.exh.addex(RuntimeError, "Frequency must increase")
exnfreq = pexdoc.exh.addex(RuntimeError, "Noise frequency must increase")
# Verify that file has correct extension format
_, ext = os.path.splitext(fname)
ext = ext.lower()
nports_regexp = re.compile(r"\.s(\d+)p")
match = nports_regexp.match(ext)
exnports(not match, edata={"field": "fname", "value": fname})
nports = int(match.groups()[0])
opt_line = False
units_dict = {"GHZ": "GHz", "MHZ": "MHz", "KHZ": "KHz", "HZ": "Hz"}
scale_dict = {"GHZ": 1e9, "MHZ": 1e6, "KHZ": 1e3, "HZ": 1.0}
units_opts = ["GHZ", "MHZ", "KHZ", "HZ"]
type_opts = ["S", "Y", "Z", "H", "G"]
format_opts = ["DB", "MA", "RI"]
opts = dict(units=None, ptype=None, pformat=None, z0=None)
data = []
with open(fname, "r") as fobj:
for num, line in enumerate(fobj):
line = line.strip().upper()
# Comment line
if line.startswith("!"):
continue
# Options line
if (not opt_line) and (not line.startswith("#")):
exnoopt(True)
if not opt_line:
# Each Touchstone data file must contain an option line
# (additional option lines after the first one will be ignored)
opt_line = True
tokens = line[1:].split() # Remove initial hash
if "R" in tokens:
idx = tokens.index("R")
add = 1
if len(tokens) > idx + 1:
try:
opts["z0"] = float(tokens[idx + 1])
add = 2
except:
pass
tokens = tokens[:idx] + tokens[idx + add :]
matches = 0
for token in tokens:
if (token in format_opts) and (not opts["pformat"]):
matches += 1
opts["pformat"] = token
elif (token in units_opts) and (not opts["units"]):
matches += 1
opts["units"] = units_dict[token]
elif (token in type_opts) and (not opts["ptype"]):
matches += 1
opts["ptype"] = token
exopt(matches != len(tokens))
if opt_line and line.startswith("#"):
continue
# Data lines
try:
if "!" in line:
idx = line.index("!")
line = line[:idx]
tokens = [float(item) for item in line.split()]
data.append(tokens)
except:
exline(True, edata={"field": "lineno", "value": num + 1})
data = np.concatenate(data)
exnodata(not data.size, edata={"field": "fname", "value": fname})
# Set option defaults
opts["units"] = opts["units"] or "GHz"
opts["ptype"] = opts["ptype"] or "S"
opts["pformat"] = opts["pformat"] or "MA"
opts["z0"] = opts["z0"] or 50
# Format data
data_dict = {}
nums_per_freq = 1 + (2 * (nports ** 2))
fslice = slice(0, data.size, nums_per_freq)
freq = data[fslice]
ndiff = np.diff(freq)
ndict = {}
if (nports == 2) and ndiff.size and (min(ndiff) <= 0):
# Extract noise data
npoints = np.where(ndiff <= 0)[0][0] + 1
freq = freq[:npoints]
ndata = data[9 * npoints :]
nfpoints = int(ndata.size / 5.0)
exndata(ndata.size % 5 != 0)
data = data[: 9 * npoints]
ndiff = 1
nfslice = slice(0, ndata.size, 5)
nfreq = ndata[nfslice]
ndiff = np.diff(nfreq)
exnfreq(bool(ndiff.size and (min(ndiff) <= 0)))
nfig_slice = slice(1, ndata.size, 5)
rlmag_slice = slice(2, ndata.size, 5)
rlphase_slice = slice(3, ndata.size, 5)
res_slice = slice(4, ndata.size, 5)
ndict["freq"] = scale_dict[opts["units"].upper()] * nfreq
ndict["nf"] = ndata[nfig_slice]
ndict["rc"] = ndata[rlmag_slice] * np.exp(1j * ndata[rlphase_slice])
ndict["res"] = ndata[res_slice]
ndict["points"] = nfpoints
exdata(data.size % nums_per_freq != 0)
npoints = int(data.size / nums_per_freq)
exfreq(bool(ndiff.size and (min(ndiff) <= 0)))
data_dict["freq"] = scale_dict[opts["units"].upper()] * freq
d1slice = slice(0, data.size, 2)
d2slice = slice(1, data.size, 2)
data = np.delete(data, fslice)
# For format that has angle information, the angle is given in degrees
if opts["pformat"] == "MA":
data = data[d1slice] * np.exp(1j * np.deg2rad(data[d2slice]))
elif opts["pformat"] == "RI":
data = data[d1slice] + (1j * data[d2slice])
else: # if opts['pformat'] == 'DB':
data = (10 ** (data[d1slice] / 20.0)) * np.exp(1j * np.deg2rad(data[d2slice]))
if nports > 1:
data_dict["pars"] = np.resize(data, (npoints, nports, nports))
else:
data_dict["pars"] = copy.copy(data)
del data
data_dict["points"] = npoints
if nports == 2:
# The order of data for a two-port file is N11, N21, N12, N22 but for
# m ports where m > 2, the order is N11, N12, N13, ..., N1m
data_dict["pars"] = np.transpose(data_dict["pars"], (0, 2, 1))
return dict(nports=nports, opts=opts, data=data_dict, noise=ndict)
|
def function[read_touchstone, parameter[fname]]:
constant[
Read a `Touchstone <https://ibis.org/connector/touchstone_spec11.pdf>`_ file.
According to the specification a data line can have at most values for four
complex parameters (plus potentially the frequency point), however this
function is able to process malformed files as long as they have the
correct number of data points (:code:`points` x :code:`nports` x
:code:`nports` where :code:`points` represents the number of frequency
points and :code:`nports` represents the number of ports in the file). Per
the Touchstone specification noise data is only supported for two-port
files
:param fname: Touchstone file name
:type fname: `FileNameExists <https://pexdoc.readthedocs.io/en/stable/
ptypes.html#filenameexists>`_
:rtype: dictionary with the following structure:
* **nports** (*integer*) -- number of ports
* **opts** (:ref:`TouchstoneOptions`) -- File options
* **data** (:ref:`TouchstoneData`) -- Parameter data
* **noise** (:ref:`TouchstoneNoiseData`) -- Noise data, per the Touchstone
specification only supported in 2-port files
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.touchstone.read_touchstone
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (File *[fname]* does not have a valid extension)
* RuntimeError (File *[fname]* has no data)
* RuntimeError (First non-comment line is not the option line)
* RuntimeError (Frequency must increase)
* RuntimeError (Illegal data in line *[lineno]*)
* RuntimeError (Illegal option line)
* RuntimeError (Malformed data)
* RuntimeError (Malformed noise data)
* RuntimeError (Noise frequency must increase)
.. [[[end]]]
.. note:: The returned parameter(s) are complex numbers in real and
imaginary format regardless of the format used in the Touchstone file.
Similarly, the returned frequency vector unit is Hertz regardless of
the unit used in the Touchstone file
]
variable[exnports] assign[=] call[name[pexdoc].exh.addex, parameter[name[RuntimeError], constant[File *[fname]* does not have a valid extension]]]
variable[exnoopt] assign[=] call[name[pexdoc].exh.addex, parameter[name[RuntimeError], constant[First non-comment line is not the option line]]]
variable[exopt] assign[=] call[name[pexdoc].exh.addex, parameter[name[RuntimeError], constant[Illegal option line]]]
variable[exline] assign[=] call[name[pexdoc].exh.addex, parameter[name[RuntimeError], constant[Illegal data in line *[lineno]*]]]
variable[exnodata] assign[=] call[name[pexdoc].exh.addex, parameter[name[RuntimeError], constant[File *[fname]* has no data]]]
variable[exdata] assign[=] call[name[pexdoc].exh.addex, parameter[name[RuntimeError], constant[Malformed data]]]
variable[exndata] assign[=] call[name[pexdoc].exh.addex, parameter[name[RuntimeError], constant[Malformed noise data]]]
variable[exfreq] assign[=] call[name[pexdoc].exh.addex, parameter[name[RuntimeError], constant[Frequency must increase]]]
variable[exnfreq] assign[=] call[name[pexdoc].exh.addex, parameter[name[RuntimeError], constant[Noise frequency must increase]]]
<ast.Tuple object at 0x7da1b026f0d0> assign[=] call[name[os].path.splitext, parameter[name[fname]]]
variable[ext] assign[=] call[name[ext].lower, parameter[]]
variable[nports_regexp] assign[=] call[name[re].compile, parameter[constant[\.s(\d+)p]]]
variable[match] assign[=] call[name[nports_regexp].match, parameter[name[ext]]]
call[name[exnports], parameter[<ast.UnaryOp object at 0x7da1b026eb90>]]
variable[nports] assign[=] call[name[int], parameter[call[call[name[match].groups, parameter[]]][constant[0]]]]
variable[opt_line] assign[=] constant[False]
variable[units_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b026e710>, <ast.Constant object at 0x7da1b026e6e0>, <ast.Constant object at 0x7da1b026e6b0>, <ast.Constant object at 0x7da1b026e680>], [<ast.Constant object at 0x7da1b026e650>, <ast.Constant object at 0x7da1b026e620>, <ast.Constant object at 0x7da1b026e5f0>, <ast.Constant object at 0x7da1b026e5c0>]]
variable[scale_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b026e500>, <ast.Constant object at 0x7da1b026e4d0>, <ast.Constant object at 0x7da1b026e4a0>, <ast.Constant object at 0x7da1b026e470>], [<ast.Constant object at 0x7da1b026e440>, <ast.Constant object at 0x7da1b026e410>, <ast.Constant object at 0x7da1b026e3e0>, <ast.Constant object at 0x7da1b026e3b0>]]
variable[units_opts] assign[=] list[[<ast.Constant object at 0x7da1b026e2f0>, <ast.Constant object at 0x7da1b026e2c0>, <ast.Constant object at 0x7da1b026e290>, <ast.Constant object at 0x7da1b026e260>]]
variable[type_opts] assign[=] list[[<ast.Constant object at 0x7da1b026e170>, <ast.Constant object at 0x7da1b026e140>, <ast.Constant object at 0x7da1b026e110>, <ast.Constant object at 0x7da1b026e0e0>, <ast.Constant object at 0x7da1b026e0b0>]]
variable[format_opts] assign[=] list[[<ast.Constant object at 0x7da1b026dff0>, <ast.Constant object at 0x7da1b026dfc0>, <ast.Constant object at 0x7da1b026df90>]]
variable[opts] assign[=] call[name[dict], parameter[]]
variable[data] assign[=] list[[]]
with call[name[open], parameter[name[fname], constant[r]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b026dab0>, <ast.Name object at 0x7da1b026da80>]]] in starred[call[name[enumerate], parameter[name[fobj]]]] begin[:]
variable[line] assign[=] call[call[name[line].strip, parameter[]].upper, parameter[]]
if call[name[line].startswith, parameter[constant[!]]] begin[:]
continue
if <ast.BoolOp object at 0x7da1b026d6c0> begin[:]
call[name[exnoopt], parameter[constant[True]]]
if <ast.UnaryOp object at 0x7da1b026d420> begin[:]
variable[opt_line] assign[=] constant[True]
variable[tokens] assign[=] call[call[name[line]][<ast.Slice object at 0x7da1b0214f70>].split, parameter[]]
if compare[constant[R] in name[tokens]] begin[:]
variable[idx] assign[=] call[name[tokens].index, parameter[constant[R]]]
variable[add] assign[=] constant[1]
if compare[call[name[len], parameter[name[tokens]]] greater[>] binary_operation[name[idx] + constant[1]]] begin[:]
<ast.Try object at 0x7da1b0215de0>
variable[tokens] assign[=] binary_operation[call[name[tokens]][<ast.Slice object at 0x7da1b0216140>] + call[name[tokens]][<ast.Slice object at 0x7da1b0216260>]]
variable[matches] assign[=] constant[0]
for taget[name[token]] in starred[name[tokens]] begin[:]
if <ast.BoolOp object at 0x7da1b0216530> begin[:]
<ast.AugAssign object at 0x7da1b0216650>
call[name[opts]][constant[pformat]] assign[=] name[token]
call[name[exopt], parameter[compare[name[matches] not_equal[!=] call[name[len], parameter[name[tokens]]]]]]
if <ast.BoolOp object at 0x7da1b0217220> begin[:]
continue
<ast.Try object at 0x7da1b0217430>
variable[data] assign[=] call[name[np].concatenate, parameter[name[data]]]
call[name[exnodata], parameter[<ast.UnaryOp object at 0x7da1b02146d0>]]
call[name[opts]][constant[units]] assign[=] <ast.BoolOp object at 0x7da1b02147f0>
call[name[opts]][constant[ptype]] assign[=] <ast.BoolOp object at 0x7da1b0214040>
call[name[opts]][constant[pformat]] assign[=] <ast.BoolOp object at 0x7da1b02152a0>
call[name[opts]][constant[z0]] assign[=] <ast.BoolOp object at 0x7da1b0215780>
variable[data_dict] assign[=] dictionary[[], []]
variable[nums_per_freq] assign[=] binary_operation[constant[1] + binary_operation[constant[2] * binary_operation[name[nports] ** constant[2]]]]
variable[fslice] assign[=] call[name[slice], parameter[constant[0], name[data].size, name[nums_per_freq]]]
variable[freq] assign[=] call[name[data]][name[fslice]]
variable[ndiff] assign[=] call[name[np].diff, parameter[name[freq]]]
variable[ndict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b0213f70> begin[:]
variable[npoints] assign[=] binary_operation[call[call[call[name[np].where, parameter[compare[name[ndiff] less_or_equal[<=] constant[0]]]]][constant[0]]][constant[0]] + constant[1]]
variable[freq] assign[=] call[name[freq]][<ast.Slice object at 0x7da1b02121d0>]
variable[ndata] assign[=] call[name[data]][<ast.Slice object at 0x7da1b0212140>]
variable[nfpoints] assign[=] call[name[int], parameter[binary_operation[name[ndata].size / constant[5.0]]]]
call[name[exndata], parameter[compare[binary_operation[name[ndata].size <ast.Mod object at 0x7da2590d6920> constant[5]] not_equal[!=] constant[0]]]]
variable[data] assign[=] call[name[data]][<ast.Slice object at 0x7da1b0211900>]
variable[ndiff] assign[=] constant[1]
variable[nfslice] assign[=] call[name[slice], parameter[constant[0], name[ndata].size, constant[5]]]
variable[nfreq] assign[=] call[name[ndata]][name[nfslice]]
variable[ndiff] assign[=] call[name[np].diff, parameter[name[nfreq]]]
call[name[exnfreq], parameter[call[name[bool], parameter[<ast.BoolOp object at 0x7da1b0213cd0>]]]]
variable[nfig_slice] assign[=] call[name[slice], parameter[constant[1], name[ndata].size, constant[5]]]
variable[rlmag_slice] assign[=] call[name[slice], parameter[constant[2], name[ndata].size, constant[5]]]
variable[rlphase_slice] assign[=] call[name[slice], parameter[constant[3], name[ndata].size, constant[5]]]
variable[res_slice] assign[=] call[name[slice], parameter[constant[4], name[ndata].size, constant[5]]]
call[name[ndict]][constant[freq]] assign[=] binary_operation[call[name[scale_dict]][call[call[name[opts]][constant[units]].upper, parameter[]]] * name[nfreq]]
call[name[ndict]][constant[nf]] assign[=] call[name[ndata]][name[nfig_slice]]
call[name[ndict]][constant[rc]] assign[=] binary_operation[call[name[ndata]][name[rlmag_slice]] * call[name[np].exp, parameter[binary_operation[constant[1j] * call[name[ndata]][name[rlphase_slice]]]]]]
call[name[ndict]][constant[res]] assign[=] call[name[ndata]][name[res_slice]]
call[name[ndict]][constant[points]] assign[=] name[nfpoints]
call[name[exdata], parameter[compare[binary_operation[name[data].size <ast.Mod object at 0x7da2590d6920> name[nums_per_freq]] not_equal[!=] constant[0]]]]
variable[npoints] assign[=] call[name[int], parameter[binary_operation[name[data].size / name[nums_per_freq]]]]
call[name[exfreq], parameter[call[name[bool], parameter[<ast.BoolOp object at 0x7da1b0213b50>]]]]
call[name[data_dict]][constant[freq]] assign[=] binary_operation[call[name[scale_dict]][call[call[name[opts]][constant[units]].upper, parameter[]]] * name[freq]]
variable[d1slice] assign[=] call[name[slice], parameter[constant[0], name[data].size, constant[2]]]
variable[d2slice] assign[=] call[name[slice], parameter[constant[1], name[data].size, constant[2]]]
variable[data] assign[=] call[name[np].delete, parameter[name[data], name[fslice]]]
if compare[call[name[opts]][constant[pformat]] equal[==] constant[MA]] begin[:]
variable[data] assign[=] binary_operation[call[name[data]][name[d1slice]] * call[name[np].exp, parameter[binary_operation[constant[1j] * call[name[np].deg2rad, parameter[call[name[data]][name[d2slice]]]]]]]]
if compare[name[nports] greater[>] constant[1]] begin[:]
call[name[data_dict]][constant[pars]] assign[=] call[name[np].resize, parameter[name[data], tuple[[<ast.Name object at 0x7da1b0209b10>, <ast.Name object at 0x7da1b02090c0>, <ast.Name object at 0x7da1b020a350>]]]]
<ast.Delete object at 0x7da1b0209bd0>
call[name[data_dict]][constant[points]] assign[=] name[npoints]
if compare[name[nports] equal[==] constant[2]] begin[:]
call[name[data_dict]][constant[pars]] assign[=] call[name[np].transpose, parameter[call[name[data_dict]][constant[pars]], tuple[[<ast.Constant object at 0x7da1b0209c00>, <ast.Constant object at 0x7da1b0209ab0>, <ast.Constant object at 0x7da1b0209090>]]]]
return[call[name[dict], parameter[]]]
|
keyword[def] identifier[read_touchstone] ( identifier[fname] ):
literal[string]
identifier[exnports] = identifier[pexdoc] . identifier[exh] . identifier[addex] (
identifier[RuntimeError] , literal[string]
)
identifier[exnoopt] = identifier[pexdoc] . identifier[exh] . identifier[addex] (
identifier[RuntimeError] , literal[string]
)
identifier[exopt] = identifier[pexdoc] . identifier[exh] . identifier[addex] ( identifier[RuntimeError] , literal[string] )
identifier[exline] = identifier[pexdoc] . identifier[exh] . identifier[addex] ( identifier[RuntimeError] , literal[string] )
identifier[exnodata] = identifier[pexdoc] . identifier[exh] . identifier[addex] ( identifier[RuntimeError] , literal[string] )
identifier[exdata] = identifier[pexdoc] . identifier[exh] . identifier[addex] ( identifier[RuntimeError] , literal[string] )
identifier[exndata] = identifier[pexdoc] . identifier[exh] . identifier[addex] ( identifier[RuntimeError] , literal[string] )
identifier[exfreq] = identifier[pexdoc] . identifier[exh] . identifier[addex] ( identifier[RuntimeError] , literal[string] )
identifier[exnfreq] = identifier[pexdoc] . identifier[exh] . identifier[addex] ( identifier[RuntimeError] , literal[string] )
identifier[_] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[fname] )
identifier[ext] = identifier[ext] . identifier[lower] ()
identifier[nports_regexp] = identifier[re] . identifier[compile] ( literal[string] )
identifier[match] = identifier[nports_regexp] . identifier[match] ( identifier[ext] )
identifier[exnports] ( keyword[not] identifier[match] , identifier[edata] ={ literal[string] : literal[string] , literal[string] : identifier[fname] })
identifier[nports] = identifier[int] ( identifier[match] . identifier[groups] ()[ literal[int] ])
identifier[opt_line] = keyword[False]
identifier[units_dict] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }
identifier[scale_dict] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
identifier[units_opts] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[type_opts] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[format_opts] =[ literal[string] , literal[string] , literal[string] ]
identifier[opts] = identifier[dict] ( identifier[units] = keyword[None] , identifier[ptype] = keyword[None] , identifier[pformat] = keyword[None] , identifier[z0] = keyword[None] )
identifier[data] =[]
keyword[with] identifier[open] ( identifier[fname] , literal[string] ) keyword[as] identifier[fobj] :
keyword[for] identifier[num] , identifier[line] keyword[in] identifier[enumerate] ( identifier[fobj] ):
identifier[line] = identifier[line] . identifier[strip] (). identifier[upper] ()
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[if] ( keyword[not] identifier[opt_line] ) keyword[and] ( keyword[not] identifier[line] . identifier[startswith] ( literal[string] )):
identifier[exnoopt] ( keyword[True] )
keyword[if] keyword[not] identifier[opt_line] :
identifier[opt_line] = keyword[True]
identifier[tokens] = identifier[line] [ literal[int] :]. identifier[split] ()
keyword[if] literal[string] keyword[in] identifier[tokens] :
identifier[idx] = identifier[tokens] . identifier[index] ( literal[string] )
identifier[add] = literal[int]
keyword[if] identifier[len] ( identifier[tokens] )> identifier[idx] + literal[int] :
keyword[try] :
identifier[opts] [ literal[string] ]= identifier[float] ( identifier[tokens] [ identifier[idx] + literal[int] ])
identifier[add] = literal[int]
keyword[except] :
keyword[pass]
identifier[tokens] = identifier[tokens] [: identifier[idx] ]+ identifier[tokens] [ identifier[idx] + identifier[add] :]
identifier[matches] = literal[int]
keyword[for] identifier[token] keyword[in] identifier[tokens] :
keyword[if] ( identifier[token] keyword[in] identifier[format_opts] ) keyword[and] ( keyword[not] identifier[opts] [ literal[string] ]):
identifier[matches] += literal[int]
identifier[opts] [ literal[string] ]= identifier[token]
keyword[elif] ( identifier[token] keyword[in] identifier[units_opts] ) keyword[and] ( keyword[not] identifier[opts] [ literal[string] ]):
identifier[matches] += literal[int]
identifier[opts] [ literal[string] ]= identifier[units_dict] [ identifier[token] ]
keyword[elif] ( identifier[token] keyword[in] identifier[type_opts] ) keyword[and] ( keyword[not] identifier[opts] [ literal[string] ]):
identifier[matches] += literal[int]
identifier[opts] [ literal[string] ]= identifier[token]
identifier[exopt] ( identifier[matches] != identifier[len] ( identifier[tokens] ))
keyword[if] identifier[opt_line] keyword[and] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[try] :
keyword[if] literal[string] keyword[in] identifier[line] :
identifier[idx] = identifier[line] . identifier[index] ( literal[string] )
identifier[line] = identifier[line] [: identifier[idx] ]
identifier[tokens] =[ identifier[float] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[line] . identifier[split] ()]
identifier[data] . identifier[append] ( identifier[tokens] )
keyword[except] :
identifier[exline] ( keyword[True] , identifier[edata] ={ literal[string] : literal[string] , literal[string] : identifier[num] + literal[int] })
identifier[data] = identifier[np] . identifier[concatenate] ( identifier[data] )
identifier[exnodata] ( keyword[not] identifier[data] . identifier[size] , identifier[edata] ={ literal[string] : literal[string] , literal[string] : identifier[fname] })
identifier[opts] [ literal[string] ]= identifier[opts] [ literal[string] ] keyword[or] literal[string]
identifier[opts] [ literal[string] ]= identifier[opts] [ literal[string] ] keyword[or] literal[string]
identifier[opts] [ literal[string] ]= identifier[opts] [ literal[string] ] keyword[or] literal[string]
identifier[opts] [ literal[string] ]= identifier[opts] [ literal[string] ] keyword[or] literal[int]
identifier[data_dict] ={}
identifier[nums_per_freq] = literal[int] +( literal[int] *( identifier[nports] ** literal[int] ))
identifier[fslice] = identifier[slice] ( literal[int] , identifier[data] . identifier[size] , identifier[nums_per_freq] )
identifier[freq] = identifier[data] [ identifier[fslice] ]
identifier[ndiff] = identifier[np] . identifier[diff] ( identifier[freq] )
identifier[ndict] ={}
keyword[if] ( identifier[nports] == literal[int] ) keyword[and] identifier[ndiff] . identifier[size] keyword[and] ( identifier[min] ( identifier[ndiff] )<= literal[int] ):
identifier[npoints] = identifier[np] . identifier[where] ( identifier[ndiff] <= literal[int] )[ literal[int] ][ literal[int] ]+ literal[int]
identifier[freq] = identifier[freq] [: identifier[npoints] ]
identifier[ndata] = identifier[data] [ literal[int] * identifier[npoints] :]
identifier[nfpoints] = identifier[int] ( identifier[ndata] . identifier[size] / literal[int] )
identifier[exndata] ( identifier[ndata] . identifier[size] % literal[int] != literal[int] )
identifier[data] = identifier[data] [: literal[int] * identifier[npoints] ]
identifier[ndiff] = literal[int]
identifier[nfslice] = identifier[slice] ( literal[int] , identifier[ndata] . identifier[size] , literal[int] )
identifier[nfreq] = identifier[ndata] [ identifier[nfslice] ]
identifier[ndiff] = identifier[np] . identifier[diff] ( identifier[nfreq] )
identifier[exnfreq] ( identifier[bool] ( identifier[ndiff] . identifier[size] keyword[and] ( identifier[min] ( identifier[ndiff] )<= literal[int] )))
identifier[nfig_slice] = identifier[slice] ( literal[int] , identifier[ndata] . identifier[size] , literal[int] )
identifier[rlmag_slice] = identifier[slice] ( literal[int] , identifier[ndata] . identifier[size] , literal[int] )
identifier[rlphase_slice] = identifier[slice] ( literal[int] , identifier[ndata] . identifier[size] , literal[int] )
identifier[res_slice] = identifier[slice] ( literal[int] , identifier[ndata] . identifier[size] , literal[int] )
identifier[ndict] [ literal[string] ]= identifier[scale_dict] [ identifier[opts] [ literal[string] ]. identifier[upper] ()]* identifier[nfreq]
identifier[ndict] [ literal[string] ]= identifier[ndata] [ identifier[nfig_slice] ]
identifier[ndict] [ literal[string] ]= identifier[ndata] [ identifier[rlmag_slice] ]* identifier[np] . identifier[exp] ( literal[int] * identifier[ndata] [ identifier[rlphase_slice] ])
identifier[ndict] [ literal[string] ]= identifier[ndata] [ identifier[res_slice] ]
identifier[ndict] [ literal[string] ]= identifier[nfpoints]
identifier[exdata] ( identifier[data] . identifier[size] % identifier[nums_per_freq] != literal[int] )
identifier[npoints] = identifier[int] ( identifier[data] . identifier[size] / identifier[nums_per_freq] )
identifier[exfreq] ( identifier[bool] ( identifier[ndiff] . identifier[size] keyword[and] ( identifier[min] ( identifier[ndiff] )<= literal[int] )))
identifier[data_dict] [ literal[string] ]= identifier[scale_dict] [ identifier[opts] [ literal[string] ]. identifier[upper] ()]* identifier[freq]
identifier[d1slice] = identifier[slice] ( literal[int] , identifier[data] . identifier[size] , literal[int] )
identifier[d2slice] = identifier[slice] ( literal[int] , identifier[data] . identifier[size] , literal[int] )
identifier[data] = identifier[np] . identifier[delete] ( identifier[data] , identifier[fslice] )
keyword[if] identifier[opts] [ literal[string] ]== literal[string] :
identifier[data] = identifier[data] [ identifier[d1slice] ]* identifier[np] . identifier[exp] ( literal[int] * identifier[np] . identifier[deg2rad] ( identifier[data] [ identifier[d2slice] ]))
keyword[elif] identifier[opts] [ literal[string] ]== literal[string] :
identifier[data] = identifier[data] [ identifier[d1slice] ]+( literal[int] * identifier[data] [ identifier[d2slice] ])
keyword[else] :
identifier[data] =( literal[int] **( identifier[data] [ identifier[d1slice] ]/ literal[int] ))* identifier[np] . identifier[exp] ( literal[int] * identifier[np] . identifier[deg2rad] ( identifier[data] [ identifier[d2slice] ]))
keyword[if] identifier[nports] > literal[int] :
identifier[data_dict] [ literal[string] ]= identifier[np] . identifier[resize] ( identifier[data] ,( identifier[npoints] , identifier[nports] , identifier[nports] ))
keyword[else] :
identifier[data_dict] [ literal[string] ]= identifier[copy] . identifier[copy] ( identifier[data] )
keyword[del] identifier[data]
identifier[data_dict] [ literal[string] ]= identifier[npoints]
keyword[if] identifier[nports] == literal[int] :
identifier[data_dict] [ literal[string] ]= identifier[np] . identifier[transpose] ( identifier[data_dict] [ literal[string] ],( literal[int] , literal[int] , literal[int] ))
keyword[return] identifier[dict] ( identifier[nports] = identifier[nports] , identifier[opts] = identifier[opts] , identifier[data] = identifier[data_dict] , identifier[noise] = identifier[ndict] )
|
def read_touchstone(fname):
"""
Read a `Touchstone <https://ibis.org/connector/touchstone_spec11.pdf>`_ file.
According to the specification a data line can have at most values for four
complex parameters (plus potentially the frequency point), however this
function is able to process malformed files as long as they have the
correct number of data points (:code:`points` x :code:`nports` x
:code:`nports` where :code:`points` represents the number of frequency
points and :code:`nports` represents the number of ports in the file). Per
the Touchstone specification noise data is only supported for two-port
files
:param fname: Touchstone file name
:type fname: `FileNameExists <https://pexdoc.readthedocs.io/en/stable/
ptypes.html#filenameexists>`_
:rtype: dictionary with the following structure:
* **nports** (*integer*) -- number of ports
* **opts** (:ref:`TouchstoneOptions`) -- File options
* **data** (:ref:`TouchstoneData`) -- Parameter data
* **noise** (:ref:`TouchstoneNoiseData`) -- Noise data, per the Touchstone
specification only supported in 2-port files
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.touchstone.read_touchstone
:raises:
* OSError (File *[fname]* could not be found)
* RuntimeError (Argument \\`fname\\` is not valid)
* RuntimeError (File *[fname]* does not have a valid extension)
* RuntimeError (File *[fname]* has no data)
* RuntimeError (First non-comment line is not the option line)
* RuntimeError (Frequency must increase)
* RuntimeError (Illegal data in line *[lineno]*)
* RuntimeError (Illegal option line)
* RuntimeError (Malformed data)
* RuntimeError (Malformed noise data)
* RuntimeError (Noise frequency must increase)
.. [[[end]]]
.. note:: The returned parameter(s) are complex numbers in real and
imaginary format regardless of the format used in the Touchstone file.
Similarly, the returned frequency vector unit is Hertz regardless of
the unit used in the Touchstone file
"""
# pylint: disable=R0912,R0915,W0702
# Exceptions definitions
exnports = pexdoc.exh.addex(RuntimeError, 'File *[fname]* does not have a valid extension')
exnoopt = pexdoc.exh.addex(RuntimeError, 'First non-comment line is not the option line')
exopt = pexdoc.exh.addex(RuntimeError, 'Illegal option line')
exline = pexdoc.exh.addex(RuntimeError, 'Illegal data in line *[lineno]*')
exnodata = pexdoc.exh.addex(RuntimeError, 'File *[fname]* has no data')
exdata = pexdoc.exh.addex(RuntimeError, 'Malformed data')
exndata = pexdoc.exh.addex(RuntimeError, 'Malformed noise data')
exfreq = pexdoc.exh.addex(RuntimeError, 'Frequency must increase')
exnfreq = pexdoc.exh.addex(RuntimeError, 'Noise frequency must increase')
# Verify that file has correct extension format
(_, ext) = os.path.splitext(fname)
ext = ext.lower()
nports_regexp = re.compile('\\.s(\\d+)p')
match = nports_regexp.match(ext)
exnports(not match, edata={'field': 'fname', 'value': fname})
nports = int(match.groups()[0])
opt_line = False
units_dict = {'GHZ': 'GHz', 'MHZ': 'MHz', 'KHZ': 'KHz', 'HZ': 'Hz'}
scale_dict = {'GHZ': 1000000000.0, 'MHZ': 1000000.0, 'KHZ': 1000.0, 'HZ': 1.0}
units_opts = ['GHZ', 'MHZ', 'KHZ', 'HZ']
type_opts = ['S', 'Y', 'Z', 'H', 'G']
format_opts = ['DB', 'MA', 'RI']
opts = dict(units=None, ptype=None, pformat=None, z0=None)
data = []
with open(fname, 'r') as fobj:
for (num, line) in enumerate(fobj):
line = line.strip().upper()
# Comment line
if line.startswith('!'):
continue # depends on [control=['if'], data=[]]
# Options line
if not opt_line and (not line.startswith('#')):
exnoopt(True) # depends on [control=['if'], data=[]]
if not opt_line:
# Each Touchstone data file must contain an option line
# (additional option lines after the first one will be ignored)
opt_line = True
tokens = line[1:].split() # Remove initial hash
if 'R' in tokens:
idx = tokens.index('R')
add = 1
if len(tokens) > idx + 1:
try:
opts['z0'] = float(tokens[idx + 1])
add = 2 # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
tokens = tokens[:idx] + tokens[idx + add:] # depends on [control=['if'], data=['tokens']]
matches = 0
for token in tokens:
if token in format_opts and (not opts['pformat']):
matches += 1
opts['pformat'] = token # depends on [control=['if'], data=[]]
elif token in units_opts and (not opts['units']):
matches += 1
opts['units'] = units_dict[token] # depends on [control=['if'], data=[]]
elif token in type_opts and (not opts['ptype']):
matches += 1
opts['ptype'] = token # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['token']]
exopt(matches != len(tokens)) # depends on [control=['if'], data=[]]
if opt_line and line.startswith('#'):
continue # depends on [control=['if'], data=[]]
# Data lines
try:
if '!' in line:
idx = line.index('!')
line = line[:idx] # depends on [control=['if'], data=['line']]
tokens = [float(item) for item in line.split()]
data.append(tokens) # depends on [control=['try'], data=[]]
except:
exline(True, edata={'field': 'lineno', 'value': num + 1}) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['fobj']]
data = np.concatenate(data)
exnodata(not data.size, edata={'field': 'fname', 'value': fname})
# Set option defaults
opts['units'] = opts['units'] or 'GHz'
opts['ptype'] = opts['ptype'] or 'S'
opts['pformat'] = opts['pformat'] or 'MA'
opts['z0'] = opts['z0'] or 50
# Format data
data_dict = {}
nums_per_freq = 1 + 2 * nports ** 2
fslice = slice(0, data.size, nums_per_freq)
freq = data[fslice]
ndiff = np.diff(freq)
ndict = {}
if nports == 2 and ndiff.size and (min(ndiff) <= 0):
# Extract noise data
npoints = np.where(ndiff <= 0)[0][0] + 1
freq = freq[:npoints]
ndata = data[9 * npoints:]
nfpoints = int(ndata.size / 5.0)
exndata(ndata.size % 5 != 0)
data = data[:9 * npoints]
ndiff = 1
nfslice = slice(0, ndata.size, 5)
nfreq = ndata[nfslice]
ndiff = np.diff(nfreq)
exnfreq(bool(ndiff.size and min(ndiff) <= 0))
nfig_slice = slice(1, ndata.size, 5)
rlmag_slice = slice(2, ndata.size, 5)
rlphase_slice = slice(3, ndata.size, 5)
res_slice = slice(4, ndata.size, 5)
ndict['freq'] = scale_dict[opts['units'].upper()] * nfreq
ndict['nf'] = ndata[nfig_slice]
ndict['rc'] = ndata[rlmag_slice] * np.exp(1j * ndata[rlphase_slice])
ndict['res'] = ndata[res_slice]
ndict['points'] = nfpoints # depends on [control=['if'], data=[]]
exdata(data.size % nums_per_freq != 0)
npoints = int(data.size / nums_per_freq)
exfreq(bool(ndiff.size and min(ndiff) <= 0))
data_dict['freq'] = scale_dict[opts['units'].upper()] * freq
d1slice = slice(0, data.size, 2)
d2slice = slice(1, data.size, 2)
data = np.delete(data, fslice)
# For format that has angle information, the angle is given in degrees
if opts['pformat'] == 'MA':
data = data[d1slice] * np.exp(1j * np.deg2rad(data[d2slice])) # depends on [control=['if'], data=[]]
elif opts['pformat'] == 'RI':
data = data[d1slice] + 1j * data[d2slice] # depends on [control=['if'], data=[]]
else: # if opts['pformat'] == 'DB':
data = 10 ** (data[d1slice] / 20.0) * np.exp(1j * np.deg2rad(data[d2slice]))
if nports > 1:
data_dict['pars'] = np.resize(data, (npoints, nports, nports)) # depends on [control=['if'], data=['nports']]
else:
data_dict['pars'] = copy.copy(data)
del data
data_dict['points'] = npoints
if nports == 2:
# The order of data for a two-port file is N11, N21, N12, N22 but for
# m ports where m > 2, the order is N11, N12, N13, ..., N1m
data_dict['pars'] = np.transpose(data_dict['pars'], (0, 2, 1)) # depends on [control=['if'], data=[]]
return dict(nports=nports, opts=opts, data=data_dict, noise=ndict)
|
def _repr_label_vector(self, label_vector):
"""
Return a human-readable representation of the Cannon label vector.
label_vector should be [[(1,2), (2,1)], [(1,3)]] etc.
"""
string = ["1"]
for cross_terms in label_vector:
sub_string = []
for index, order in cross_terms:
_ = self.grid_points.dtype.names[index]
if order > 1:
sub_string.append("{0}^{1}".format(_, order))
else:
sub_string.append(_)
string.append(" * ".join(sub_string))
return " + ".join(string)
|
def function[_repr_label_vector, parameter[self, label_vector]]:
constant[
Return a human-readable representation of the Cannon label vector.
label_vector should be [[(1,2), (2,1)], [(1,3)]] etc.
]
variable[string] assign[=] list[[<ast.Constant object at 0x7da18fe93c10>]]
for taget[name[cross_terms]] in starred[name[label_vector]] begin[:]
variable[sub_string] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18fe92a10>, <ast.Name object at 0x7da18fe90ca0>]]] in starred[name[cross_terms]] begin[:]
variable[_] assign[=] call[name[self].grid_points.dtype.names][name[index]]
if compare[name[order] greater[>] constant[1]] begin[:]
call[name[sub_string].append, parameter[call[constant[{0}^{1}].format, parameter[name[_], name[order]]]]]
call[name[string].append, parameter[call[constant[ * ].join, parameter[name[sub_string]]]]]
return[call[constant[ + ].join, parameter[name[string]]]]
|
keyword[def] identifier[_repr_label_vector] ( identifier[self] , identifier[label_vector] ):
literal[string]
identifier[string] =[ literal[string] ]
keyword[for] identifier[cross_terms] keyword[in] identifier[label_vector] :
identifier[sub_string] =[]
keyword[for] identifier[index] , identifier[order] keyword[in] identifier[cross_terms] :
identifier[_] = identifier[self] . identifier[grid_points] . identifier[dtype] . identifier[names] [ identifier[index] ]
keyword[if] identifier[order] > literal[int] :
identifier[sub_string] . identifier[append] ( literal[string] . identifier[format] ( identifier[_] , identifier[order] ))
keyword[else] :
identifier[sub_string] . identifier[append] ( identifier[_] )
identifier[string] . identifier[append] ( literal[string] . identifier[join] ( identifier[sub_string] ))
keyword[return] literal[string] . identifier[join] ( identifier[string] )
|
def _repr_label_vector(self, label_vector):
"""
Return a human-readable representation of the Cannon label vector.
label_vector should be [[(1,2), (2,1)], [(1,3)]] etc.
"""
string = ['1']
for cross_terms in label_vector:
sub_string = []
for (index, order) in cross_terms:
_ = self.grid_points.dtype.names[index]
if order > 1:
sub_string.append('{0}^{1}'.format(_, order)) # depends on [control=['if'], data=['order']]
else:
sub_string.append(_) # depends on [control=['for'], data=[]]
string.append(' * '.join(sub_string)) # depends on [control=['for'], data=['cross_terms']]
return ' + '.join(string)
|
def add(name, **kwargs):
'''
Add a job to the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.add job1 function='test.ping' seconds=3600
# If function have some arguments, use job_args
salt '*' schedule.add job2 function='cmd.run' job_args="['date >> /tmp/date.log']" seconds=60
'''
ret = {'comment': 'Failed to add job {0} to schedule.'.format(name),
'result': False}
if name in list_(show_all=True, return_yaml=False):
ret['comment'] = 'Job {0} already exists in schedule.'.format(name)
ret['result'] = False
return ret
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
if item in kwargs and 'cron' in kwargs:
time_conflict = True
if time_conflict:
ret['comment'] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" or "cron" options.'
return ret
if 'when' in kwargs and 'cron' in kwargs:
ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
_new = build_schedule_item(name, **kwargs)
if 'result' in _new and not _new['result']:
return _new
schedule_data = {}
schedule_data[name] = _new
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Job: {0} would be added to schedule.'.format(name)
ret['result'] = True
else:
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'name': name,
'schedule': schedule_data,
'func': 'add',
'persist': persist}, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_add_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
if name in schedule:
ret['result'] = True
ret['comment'] = 'Added job: {0} to schedule.'.format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule add failed.'
return ret
|
def function[add, parameter[name]]:
constant[
Add a job to the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.add job1 function='test.ping' seconds=3600
# If function have some arguments, use job_args
salt '*' schedule.add job2 function='cmd.run' job_args="['date >> /tmp/date.log']" seconds=60
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c98eb0>, <ast.Constant object at 0x7da1b1c99210>], [<ast.Call object at 0x7da1b1c9a950>, <ast.Constant object at 0x7da1b1c98310>]]
if compare[name[name] in call[name[list_], parameter[]]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Job {0} already exists in schedule.].format, parameter[name[name]]]
call[name[ret]][constant[result]] assign[=] constant[False]
return[name[ret]]
if <ast.UnaryOp object at 0x7da1b1c98400> begin[:]
call[name[ret]][constant[comment]] assign[=] constant[Job name is required.]
call[name[ret]][constant[result]] assign[=] constant[False]
variable[time_conflict] assign[=] constant[False]
for taget[name[item]] in starred[list[[<ast.Constant object at 0x7da1b1c99510>, <ast.Constant object at 0x7da1b1c9bd30>, <ast.Constant object at 0x7da1b1c9b0d0>, <ast.Constant object at 0x7da1b1c9bca0>]]] begin[:]
if <ast.BoolOp object at 0x7da1b1c9b580> begin[:]
variable[time_conflict] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b1c98160> begin[:]
variable[time_conflict] assign[=] constant[True]
if name[time_conflict] begin[:]
call[name[ret]][constant[comment]] assign[=] constant[Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" or "cron" options.]
return[name[ret]]
if <ast.BoolOp object at 0x7da1b1c9a410> begin[:]
call[name[ret]][constant[comment]] assign[=] constant[Unable to use "when" and "cron" options together. Ignoring.]
return[name[ret]]
variable[persist] assign[=] constant[True]
if compare[constant[persist] in name[kwargs]] begin[:]
variable[persist] assign[=] call[name[kwargs]][constant[persist]]
variable[_new] assign[=] call[name[build_schedule_item], parameter[name[name]]]
if <ast.BoolOp object at 0x7da1b1c995d0> begin[:]
return[name[_new]]
variable[schedule_data] assign[=] dictionary[[], []]
call[name[schedule_data]][name[name]] assign[=] name[_new]
if <ast.BoolOp object at 0x7da1b1c9acb0> begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Job: {0} would be added to schedule.].format, parameter[name[name]]]
call[name[ret]][constant[result]] assign[=] constant[True]
return[name[ret]]
|
keyword[def] identifier[add] ( identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : literal[string] . identifier[format] ( identifier[name] ),
literal[string] : keyword[False] }
keyword[if] identifier[name] keyword[in] identifier[list_] ( identifier[show_all] = keyword[True] , identifier[return_yaml] = keyword[False] ):
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[name] :
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= keyword[False]
identifier[time_conflict] = keyword[False]
keyword[for] identifier[item] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[item] keyword[in] identifier[kwargs] keyword[and] literal[string] keyword[in] identifier[kwargs] :
identifier[time_conflict] = keyword[True]
keyword[if] identifier[item] keyword[in] identifier[kwargs] keyword[and] literal[string] keyword[in] identifier[kwargs] :
identifier[time_conflict] = keyword[True]
keyword[if] identifier[time_conflict] :
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[and] literal[string] keyword[in] identifier[kwargs] :
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[persist] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[persist] = identifier[kwargs] [ literal[string] ]
identifier[_new] = identifier[build_schedule_item] ( identifier[name] ,** identifier[kwargs] )
keyword[if] literal[string] keyword[in] identifier[_new] keyword[and] keyword[not] identifier[_new] [ literal[string] ]:
keyword[return] identifier[_new]
identifier[schedule_data] ={}
identifier[schedule_data] [ identifier[name] ]= identifier[_new]
keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[and] identifier[kwargs] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= keyword[True]
keyword[else] :
keyword[try] :
identifier[eventer] = identifier[salt] . identifier[utils] . identifier[event] . identifier[get_event] ( literal[string] , identifier[opts] = identifier[__opts__] )
identifier[res] = identifier[__salt__] [ literal[string] ]({ literal[string] : identifier[name] ,
literal[string] : identifier[schedule_data] ,
literal[string] : literal[string] ,
literal[string] : identifier[persist] }, literal[string] )
keyword[if] identifier[res] :
identifier[event_ret] = identifier[eventer] . identifier[get_event] ( identifier[tag] = literal[string] , identifier[wait] = literal[int] )
keyword[if] identifier[event_ret] keyword[and] identifier[event_ret] [ literal[string] ]:
identifier[schedule] = identifier[event_ret] [ literal[string] ]
keyword[if] identifier[name] keyword[in] identifier[schedule] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
keyword[except] identifier[KeyError] :
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
|
def add(name, **kwargs):
"""
Add a job to the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.add job1 function='test.ping' seconds=3600
# If function have some arguments, use job_args
salt '*' schedule.add job2 function='cmd.run' job_args="['date >> /tmp/date.log']" seconds=60
"""
ret = {'comment': 'Failed to add job {0} to schedule.'.format(name), 'result': False}
if name in list_(show_all=True, return_yaml=False):
ret['comment'] = 'Job {0} already exists in schedule.'.format(name)
ret['result'] = False
return ret # depends on [control=['if'], data=['name']]
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False # depends on [control=['if'], data=[]]
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True # depends on [control=['if'], data=[]]
if item in kwargs and 'cron' in kwargs:
time_conflict = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
if time_conflict:
ret['comment'] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" or "cron" options.'
return ret # depends on [control=['if'], data=[]]
if 'when' in kwargs and 'cron' in kwargs:
ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret # depends on [control=['if'], data=[]]
persist = True
if 'persist' in kwargs:
persist = kwargs['persist'] # depends on [control=['if'], data=['kwargs']]
_new = build_schedule_item(name, **kwargs)
if 'result' in _new and (not _new['result']):
return _new # depends on [control=['if'], data=[]]
schedule_data = {}
schedule_data[name] = _new
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Job: {0} would be added to schedule.'.format(name)
ret['result'] = True # depends on [control=['if'], data=[]]
else:
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'name': name, 'schedule': schedule_data, 'func': 'add', 'persist': persist}, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_add_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
if name in schedule:
ret['result'] = True
ret['comment'] = 'Added job: {0} to schedule.'.format(name)
return ret # depends on [control=['if'], data=['name']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule add failed.' # depends on [control=['except'], data=[]]
return ret
|
def append(self, key, value):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists."""
self.__items.append((key, value))
try:
dict_getitem(self, key).append(value)
except KeyError:
dict_setitem(self, key, [value])
|
def function[append, parameter[self, key, value]]:
constant[Adds a (name, value) pair, doesn't overwrite the value if it already
exists.]
call[name[self].__items.append, parameter[tuple[[<ast.Name object at 0x7da1b05ec7f0>, <ast.Name object at 0x7da1b05ef760>]]]]
<ast.Try object at 0x7da1b05ef1f0>
|
keyword[def] identifier[append] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[self] . identifier[__items] . identifier[append] (( identifier[key] , identifier[value] ))
keyword[try] :
identifier[dict_getitem] ( identifier[self] , identifier[key] ). identifier[append] ( identifier[value] )
keyword[except] identifier[KeyError] :
identifier[dict_setitem] ( identifier[self] , identifier[key] ,[ identifier[value] ])
|
def append(self, key, value):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists."""
self.__items.append((key, value))
try:
dict_getitem(self, key).append(value) # depends on [control=['try'], data=[]]
except KeyError:
dict_setitem(self, key, [value]) # depends on [control=['except'], data=[]]
|
def update(self, d):
"""Update the dict with the dict tree in parameter d.
Parameters
----------
d : dict
New dict content
"""
# Call __setitem__ for all keys in d
for key in list(d.keys()):
self.__setitem__(key, d[key])
|
def function[update, parameter[self, d]]:
constant[Update the dict with the dict tree in parameter d.
Parameters
----------
d : dict
New dict content
]
for taget[name[key]] in starred[call[name[list], parameter[call[name[d].keys, parameter[]]]]] begin[:]
call[name[self].__setitem__, parameter[name[key], call[name[d]][name[key]]]]
|
keyword[def] identifier[update] ( identifier[self] , identifier[d] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[d] . identifier[keys] ()):
identifier[self] . identifier[__setitem__] ( identifier[key] , identifier[d] [ identifier[key] ])
|
def update(self, d):
"""Update the dict with the dict tree in parameter d.
Parameters
----------
d : dict
New dict content
"""
# Call __setitem__ for all keys in d
for key in list(d.keys()):
self.__setitem__(key, d[key]) # depends on [control=['for'], data=['key']]
|
def get(self, key):
"""
Transactional implementation of :func:`Map.get(key) <hazelcast.proxy.map.Map.get>`
:param key: (object), the specified key.
:return: (object), the value for the specified key.
"""
check_not_none(key, "key can't be none")
return self._encode_invoke(transactional_map_get_codec, key=self._to_data(key))
|
def function[get, parameter[self, key]]:
constant[
Transactional implementation of :func:`Map.get(key) <hazelcast.proxy.map.Map.get>`
:param key: (object), the specified key.
:return: (object), the value for the specified key.
]
call[name[check_not_none], parameter[name[key], constant[key can't be none]]]
return[call[name[self]._encode_invoke, parameter[name[transactional_map_get_codec]]]]
|
keyword[def] identifier[get] ( identifier[self] , identifier[key] ):
literal[string]
identifier[check_not_none] ( identifier[key] , literal[string] )
keyword[return] identifier[self] . identifier[_encode_invoke] ( identifier[transactional_map_get_codec] , identifier[key] = identifier[self] . identifier[_to_data] ( identifier[key] ))
|
def get(self, key):
"""
Transactional implementation of :func:`Map.get(key) <hazelcast.proxy.map.Map.get>`
:param key: (object), the specified key.
:return: (object), the value for the specified key.
"""
check_not_none(key, "key can't be none")
return self._encode_invoke(transactional_map_get_codec, key=self._to_data(key))
|
def toURLEncoded(self):
"""Generate an x-www-urlencoded string"""
args = sorted(self.toPostArgs().items())
return urllib.parse.urlencode(args)
|
def function[toURLEncoded, parameter[self]]:
constant[Generate an x-www-urlencoded string]
variable[args] assign[=] call[name[sorted], parameter[call[call[name[self].toPostArgs, parameter[]].items, parameter[]]]]
return[call[name[urllib].parse.urlencode, parameter[name[args]]]]
|
keyword[def] identifier[toURLEncoded] ( identifier[self] ):
literal[string]
identifier[args] = identifier[sorted] ( identifier[self] . identifier[toPostArgs] (). identifier[items] ())
keyword[return] identifier[urllib] . identifier[parse] . identifier[urlencode] ( identifier[args] )
|
def toURLEncoded(self):
"""Generate an x-www-urlencoded string"""
args = sorted(self.toPostArgs().items())
return urllib.parse.urlencode(args)
|
def _merge_statement_lists(stmsA: List["HdlStatement"], stmsB: List["HdlStatement"])\
-> List["HdlStatement"]:
"""
Merge two lists of statements into one
:return: list of merged statements
"""
if stmsA is None and stmsB is None:
return None
tmp = []
a_it = iter(stmsA)
b_it = iter(stmsB)
a = None
b = None
a_empty = False
b_empty = False
while not a_empty and not b_empty:
while not a_empty:
a = next(a_it, None)
if a is None:
a_empty = True
break
elif a.rank == 0:
# simple statement does not require merging
tmp.append(a)
a = None
else:
break
while not b_empty:
b = next(b_it, None)
if b is None:
b_empty = True
break
elif b.rank == 0:
# simple statement does not require merging
tmp.append(b)
b = None
else:
break
if a is not None or b is not None:
a._merge_with_other_stm(b)
tmp.append(a)
a = None
b = None
return tmp
|
def function[_merge_statement_lists, parameter[stmsA, stmsB]]:
constant[
Merge two lists of statements into one
:return: list of merged statements
]
if <ast.BoolOp object at 0x7da1b032f2e0> begin[:]
return[constant[None]]
variable[tmp] assign[=] list[[]]
variable[a_it] assign[=] call[name[iter], parameter[name[stmsA]]]
variable[b_it] assign[=] call[name[iter], parameter[name[stmsB]]]
variable[a] assign[=] constant[None]
variable[b] assign[=] constant[None]
variable[a_empty] assign[=] constant[False]
variable[b_empty] assign[=] constant[False]
while <ast.BoolOp object at 0x7da1b03b8c70> begin[:]
while <ast.UnaryOp object at 0x7da1b03b8ee0> begin[:]
variable[a] assign[=] call[name[next], parameter[name[a_it], constant[None]]]
if compare[name[a] is constant[None]] begin[:]
variable[a_empty] assign[=] constant[True]
break
while <ast.UnaryOp object at 0x7da1b03bbbb0> begin[:]
variable[b] assign[=] call[name[next], parameter[name[b_it], constant[None]]]
if compare[name[b] is constant[None]] begin[:]
variable[b_empty] assign[=] constant[True]
break
if <ast.BoolOp object at 0x7da1b03e1ae0> begin[:]
call[name[a]._merge_with_other_stm, parameter[name[b]]]
call[name[tmp].append, parameter[name[a]]]
variable[a] assign[=] constant[None]
variable[b] assign[=] constant[None]
return[name[tmp]]
|
keyword[def] identifier[_merge_statement_lists] ( identifier[stmsA] : identifier[List] [ literal[string] ], identifier[stmsB] : identifier[List] [ literal[string] ])-> identifier[List] [ literal[string] ]:
literal[string]
keyword[if] identifier[stmsA] keyword[is] keyword[None] keyword[and] identifier[stmsB] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[tmp] =[]
identifier[a_it] = identifier[iter] ( identifier[stmsA] )
identifier[b_it] = identifier[iter] ( identifier[stmsB] )
identifier[a] = keyword[None]
identifier[b] = keyword[None]
identifier[a_empty] = keyword[False]
identifier[b_empty] = keyword[False]
keyword[while] keyword[not] identifier[a_empty] keyword[and] keyword[not] identifier[b_empty] :
keyword[while] keyword[not] identifier[a_empty] :
identifier[a] = identifier[next] ( identifier[a_it] , keyword[None] )
keyword[if] identifier[a] keyword[is] keyword[None] :
identifier[a_empty] = keyword[True]
keyword[break]
keyword[elif] identifier[a] . identifier[rank] == literal[int] :
identifier[tmp] . identifier[append] ( identifier[a] )
identifier[a] = keyword[None]
keyword[else] :
keyword[break]
keyword[while] keyword[not] identifier[b_empty] :
identifier[b] = identifier[next] ( identifier[b_it] , keyword[None] )
keyword[if] identifier[b] keyword[is] keyword[None] :
identifier[b_empty] = keyword[True]
keyword[break]
keyword[elif] identifier[b] . identifier[rank] == literal[int] :
identifier[tmp] . identifier[append] ( identifier[b] )
identifier[b] = keyword[None]
keyword[else] :
keyword[break]
keyword[if] identifier[a] keyword[is] keyword[not] keyword[None] keyword[or] identifier[b] keyword[is] keyword[not] keyword[None] :
identifier[a] . identifier[_merge_with_other_stm] ( identifier[b] )
identifier[tmp] . identifier[append] ( identifier[a] )
identifier[a] = keyword[None]
identifier[b] = keyword[None]
keyword[return] identifier[tmp]
|
def _merge_statement_lists(stmsA: List['HdlStatement'], stmsB: List['HdlStatement']) -> List['HdlStatement']:
"""
Merge two lists of statements into one
:return: list of merged statements
"""
if stmsA is None and stmsB is None:
return None # depends on [control=['if'], data=[]]
tmp = []
a_it = iter(stmsA)
b_it = iter(stmsB)
a = None
b = None
a_empty = False
b_empty = False
while not a_empty and (not b_empty):
while not a_empty:
a = next(a_it, None)
if a is None:
a_empty = True
break # depends on [control=['if'], data=[]]
elif a.rank == 0:
# simple statement does not require merging
tmp.append(a)
a = None # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
while not b_empty:
b = next(b_it, None)
if b is None:
b_empty = True
break # depends on [control=['if'], data=[]]
elif b.rank == 0:
# simple statement does not require merging
tmp.append(b)
b = None # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
if a is not None or b is not None:
a._merge_with_other_stm(b)
tmp.append(a)
a = None
b = None # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return tmp
|
def wait_for_event(self, emptybuffer=False):
"""
Waits until a joystick event becomes available. Returns the event, as
an `InputEvent` tuple.
If *emptybuffer* is `True` (it defaults to `False`), any pending
events will be thrown away first. This is most useful if you are only
interested in "pressed" events.
"""
if emptybuffer:
while self._wait(0):
self._read()
while self._wait():
event = self._read()
if event:
return event
|
def function[wait_for_event, parameter[self, emptybuffer]]:
constant[
Waits until a joystick event becomes available. Returns the event, as
an `InputEvent` tuple.
If *emptybuffer* is `True` (it defaults to `False`), any pending
events will be thrown away first. This is most useful if you are only
interested in "pressed" events.
]
if name[emptybuffer] begin[:]
while call[name[self]._wait, parameter[constant[0]]] begin[:]
call[name[self]._read, parameter[]]
while call[name[self]._wait, parameter[]] begin[:]
variable[event] assign[=] call[name[self]._read, parameter[]]
if name[event] begin[:]
return[name[event]]
|
keyword[def] identifier[wait_for_event] ( identifier[self] , identifier[emptybuffer] = keyword[False] ):
literal[string]
keyword[if] identifier[emptybuffer] :
keyword[while] identifier[self] . identifier[_wait] ( literal[int] ):
identifier[self] . identifier[_read] ()
keyword[while] identifier[self] . identifier[_wait] ():
identifier[event] = identifier[self] . identifier[_read] ()
keyword[if] identifier[event] :
keyword[return] identifier[event]
|
def wait_for_event(self, emptybuffer=False):
"""
Waits until a joystick event becomes available. Returns the event, as
an `InputEvent` tuple.
If *emptybuffer* is `True` (it defaults to `False`), any pending
events will be thrown away first. This is most useful if you are only
interested in "pressed" events.
"""
if emptybuffer:
while self._wait(0):
self._read() # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
while self._wait():
event = self._read()
if event:
return event # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
|
def compute_ratio(x):
"""
计算每一类数据的占比
"""
sum_ = sum(x)
ratios = []
for i in x:
ratio = i / sum_
ratios.append(ratio)
return ratios
|
def function[compute_ratio, parameter[x]]:
constant[
计算每一类数据的占比
]
variable[sum_] assign[=] call[name[sum], parameter[name[x]]]
variable[ratios] assign[=] list[[]]
for taget[name[i]] in starred[name[x]] begin[:]
variable[ratio] assign[=] binary_operation[name[i] / name[sum_]]
call[name[ratios].append, parameter[name[ratio]]]
return[name[ratios]]
|
keyword[def] identifier[compute_ratio] ( identifier[x] ):
literal[string]
identifier[sum_] = identifier[sum] ( identifier[x] )
identifier[ratios] =[]
keyword[for] identifier[i] keyword[in] identifier[x] :
identifier[ratio] = identifier[i] / identifier[sum_]
identifier[ratios] . identifier[append] ( identifier[ratio] )
keyword[return] identifier[ratios]
|
def compute_ratio(x):
"""
计算每一类数据的占比
"""
sum_ = sum(x)
ratios = []
for i in x:
ratio = i / sum_
ratios.append(ratio) # depends on [control=['for'], data=['i']]
return ratios
|
def get_favicon(self, article):
"""\
Extract the favicon from a website
http://en.wikipedia.org/wiki/Favicon
<link rel="shortcut icon" type="image/png" href="favicon.png" />
<link rel="icon" type="image/png" href="favicon.png" />
"""
kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'icon'}
meta = self.parser.getElementsByTag(article.doc, **kwargs)
if meta:
favicon = self.parser.getAttribute(meta[0], 'href')
return favicon
return ''
|
def function[get_favicon, parameter[self, article]]:
constant[ Extract the favicon from a website
http://en.wikipedia.org/wiki/Favicon
<link rel="shortcut icon" type="image/png" href="favicon.png" />
<link rel="icon" type="image/png" href="favicon.png" />
]
variable[kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da204623c40>, <ast.Constant object at 0x7da2046208e0>, <ast.Constant object at 0x7da204623700>], [<ast.Constant object at 0x7da204623730>, <ast.Constant object at 0x7da204622fb0>, <ast.Constant object at 0x7da2046229e0>]]
variable[meta] assign[=] call[name[self].parser.getElementsByTag, parameter[name[article].doc]]
if name[meta] begin[:]
variable[favicon] assign[=] call[name[self].parser.getAttribute, parameter[call[name[meta]][constant[0]], constant[href]]]
return[name[favicon]]
return[constant[]]
|
keyword[def] identifier[get_favicon] ( identifier[self] , identifier[article] ):
literal[string]
identifier[kwargs] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }
identifier[meta] = identifier[self] . identifier[parser] . identifier[getElementsByTag] ( identifier[article] . identifier[doc] ,** identifier[kwargs] )
keyword[if] identifier[meta] :
identifier[favicon] = identifier[self] . identifier[parser] . identifier[getAttribute] ( identifier[meta] [ literal[int] ], literal[string] )
keyword[return] identifier[favicon]
keyword[return] literal[string]
|
def get_favicon(self, article):
""" Extract the favicon from a website
http://en.wikipedia.org/wiki/Favicon
<link rel="shortcut icon" type="image/png" href="favicon.png" />
<link rel="icon" type="image/png" href="favicon.png" />
"""
kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'icon'}
meta = self.parser.getElementsByTag(article.doc, **kwargs)
if meta:
favicon = self.parser.getAttribute(meta[0], 'href')
return favicon # depends on [control=['if'], data=[]]
return ''
|
def check_type(param, datatype):
"""
Make sure that param is of type datatype and return it.
If param is None, return it.
If param is an instance of datatype, return it.
If param is not an instance of datatype and is not None, cast it as
datatype and return it.
"""
if param is None:
return param
if getattr(datatype, 'clean', None) and callable(datatype.clean):
try:
return datatype.clean(param)
except ValueError:
raise BadArgumentError(param)
elif isinstance(datatype, str):
# You've given it something like `'bool'` as a string.
# This is the legacy way of doing it.
datatype = {
'str': str,
'bool': bool,
'float': float,
'date': datetime.date,
'datetime': datetime.datetime,
'timedelta': datetime.timedelta,
'json': 'json', # exception
'int': int,
}[datatype]
if datatype is str and not isinstance(param, basestring):
try:
param = str(param)
except ValueError:
param = str()
elif datatype is int and not isinstance(param, int):
try:
param = int(param)
except ValueError:
param = int()
elif datatype is bool and not isinstance(param, bool):
param = str(param).lower() in ("true", "t", "1", "y", "yes")
elif (
datatype is datetime.datetime and
not isinstance(param, datetime.datetime)
):
try:
param = dtutil.string_to_datetime(param)
except ValueError:
param = None
elif datatype is datetime.date and not isinstance(param, datetime.date):
try:
param = dtutil.string_to_datetime(param).date()
except ValueError:
param = None
elif (
datatype is datetime.timedelta and
not isinstance(param, datetime.timedelta)
):
try:
param = dtutil.strHoursToTimeDelta(param)
except ValueError:
param = None
elif datatype == "json" and isinstance(param, basestring):
try:
param = json.loads(param)
except ValueError:
param = None
return param
|
def function[check_type, parameter[param, datatype]]:
constant[
Make sure that param is of type datatype and return it.
If param is None, return it.
If param is an instance of datatype, return it.
If param is not an instance of datatype and is not None, cast it as
datatype and return it.
]
if compare[name[param] is constant[None]] begin[:]
return[name[param]]
if <ast.BoolOp object at 0x7da1b0b38310> begin[:]
<ast.Try object at 0x7da1b0b38a30>
if <ast.BoolOp object at 0x7da1b0b39330> begin[:]
<ast.Try object at 0x7da1b0b3a710>
return[name[param]]
|
keyword[def] identifier[check_type] ( identifier[param] , identifier[datatype] ):
literal[string]
keyword[if] identifier[param] keyword[is] keyword[None] :
keyword[return] identifier[param]
keyword[if] identifier[getattr] ( identifier[datatype] , literal[string] , keyword[None] ) keyword[and] identifier[callable] ( identifier[datatype] . identifier[clean] ):
keyword[try] :
keyword[return] identifier[datatype] . identifier[clean] ( identifier[param] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[BadArgumentError] ( identifier[param] )
keyword[elif] identifier[isinstance] ( identifier[datatype] , identifier[str] ):
identifier[datatype] ={
literal[string] : identifier[str] ,
literal[string] : identifier[bool] ,
literal[string] : identifier[float] ,
literal[string] : identifier[datetime] . identifier[date] ,
literal[string] : identifier[datetime] . identifier[datetime] ,
literal[string] : identifier[datetime] . identifier[timedelta] ,
literal[string] : literal[string] ,
literal[string] : identifier[int] ,
}[ identifier[datatype] ]
keyword[if] identifier[datatype] keyword[is] identifier[str] keyword[and] keyword[not] identifier[isinstance] ( identifier[param] , identifier[basestring] ):
keyword[try] :
identifier[param] = identifier[str] ( identifier[param] )
keyword[except] identifier[ValueError] :
identifier[param] = identifier[str] ()
keyword[elif] identifier[datatype] keyword[is] identifier[int] keyword[and] keyword[not] identifier[isinstance] ( identifier[param] , identifier[int] ):
keyword[try] :
identifier[param] = identifier[int] ( identifier[param] )
keyword[except] identifier[ValueError] :
identifier[param] = identifier[int] ()
keyword[elif] identifier[datatype] keyword[is] identifier[bool] keyword[and] keyword[not] identifier[isinstance] ( identifier[param] , identifier[bool] ):
identifier[param] = identifier[str] ( identifier[param] ). identifier[lower] () keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )
keyword[elif] (
identifier[datatype] keyword[is] identifier[datetime] . identifier[datetime] keyword[and]
keyword[not] identifier[isinstance] ( identifier[param] , identifier[datetime] . identifier[datetime] )
):
keyword[try] :
identifier[param] = identifier[dtutil] . identifier[string_to_datetime] ( identifier[param] )
keyword[except] identifier[ValueError] :
identifier[param] = keyword[None]
keyword[elif] identifier[datatype] keyword[is] identifier[datetime] . identifier[date] keyword[and] keyword[not] identifier[isinstance] ( identifier[param] , identifier[datetime] . identifier[date] ):
keyword[try] :
identifier[param] = identifier[dtutil] . identifier[string_to_datetime] ( identifier[param] ). identifier[date] ()
keyword[except] identifier[ValueError] :
identifier[param] = keyword[None]
keyword[elif] (
identifier[datatype] keyword[is] identifier[datetime] . identifier[timedelta] keyword[and]
keyword[not] identifier[isinstance] ( identifier[param] , identifier[datetime] . identifier[timedelta] )
):
keyword[try] :
identifier[param] = identifier[dtutil] . identifier[strHoursToTimeDelta] ( identifier[param] )
keyword[except] identifier[ValueError] :
identifier[param] = keyword[None]
keyword[elif] identifier[datatype] == literal[string] keyword[and] identifier[isinstance] ( identifier[param] , identifier[basestring] ):
keyword[try] :
identifier[param] = identifier[json] . identifier[loads] ( identifier[param] )
keyword[except] identifier[ValueError] :
identifier[param] = keyword[None]
keyword[return] identifier[param]
|
def check_type(param, datatype):
"""
Make sure that param is of type datatype and return it.
If param is None, return it.
If param is an instance of datatype, return it.
If param is not an instance of datatype and is not None, cast it as
datatype and return it.
"""
if param is None:
return param # depends on [control=['if'], data=['param']]
if getattr(datatype, 'clean', None) and callable(datatype.clean):
try:
return datatype.clean(param) # depends on [control=['try'], data=[]]
except ValueError:
raise BadArgumentError(param) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(datatype, str):
# You've given it something like `'bool'` as a string.
# This is the legacy way of doing it.
# exception
datatype = {'str': str, 'bool': bool, 'float': float, 'date': datetime.date, 'datetime': datetime.datetime, 'timedelta': datetime.timedelta, 'json': 'json', 'int': int}[datatype] # depends on [control=['if'], data=[]]
if datatype is str and (not isinstance(param, basestring)):
try:
param = str(param) # depends on [control=['try'], data=[]]
except ValueError:
param = str() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif datatype is int and (not isinstance(param, int)):
try:
param = int(param) # depends on [control=['try'], data=[]]
except ValueError:
param = int() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif datatype is bool and (not isinstance(param, bool)):
param = str(param).lower() in ('true', 't', '1', 'y', 'yes') # depends on [control=['if'], data=[]]
elif datatype is datetime.datetime and (not isinstance(param, datetime.datetime)):
try:
param = dtutil.string_to_datetime(param) # depends on [control=['try'], data=[]]
except ValueError:
param = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif datatype is datetime.date and (not isinstance(param, datetime.date)):
try:
param = dtutil.string_to_datetime(param).date() # depends on [control=['try'], data=[]]
except ValueError:
param = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif datatype is datetime.timedelta and (not isinstance(param, datetime.timedelta)):
try:
param = dtutil.strHoursToTimeDelta(param) # depends on [control=['try'], data=[]]
except ValueError:
param = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif datatype == 'json' and isinstance(param, basestring):
try:
param = json.loads(param) # depends on [control=['try'], data=[]]
except ValueError:
param = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return param
|
def f_lock_parameters(self):
"""Locks all non-empty parameters"""
for par in self._parameters.values():
if not par.f_is_empty():
par.f_lock()
|
def function[f_lock_parameters, parameter[self]]:
constant[Locks all non-empty parameters]
for taget[name[par]] in starred[call[name[self]._parameters.values, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b038b820> begin[:]
call[name[par].f_lock, parameter[]]
|
keyword[def] identifier[f_lock_parameters] ( identifier[self] ):
literal[string]
keyword[for] identifier[par] keyword[in] identifier[self] . identifier[_parameters] . identifier[values] ():
keyword[if] keyword[not] identifier[par] . identifier[f_is_empty] ():
identifier[par] . identifier[f_lock] ()
|
def f_lock_parameters(self):
"""Locks all non-empty parameters"""
for par in self._parameters.values():
if not par.f_is_empty():
par.f_lock() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['par']]
|
def get_static_dependencies(self, dependencies=None, include_beta=None):
"""Resolves the project -> dependencies section of cumulusci.yml
to convert dynamic github dependencies into static dependencies
by inspecting the referenced repositories
Keyword arguments:
:param dependencies: a list of dependencies to resolve
:param include_beta: when true, return the latest github release,
even if pre-release; else return the latest stable release
"""
if not dependencies:
dependencies = self.project__dependencies
if not dependencies:
return []
static_dependencies = []
for dependency in dependencies:
if "github" not in dependency:
static_dependencies.append(dependency)
else:
static = self.process_github_dependency(
dependency, include_beta=include_beta
)
static_dependencies.extend(static)
return static_dependencies
|
def function[get_static_dependencies, parameter[self, dependencies, include_beta]]:
constant[Resolves the project -> dependencies section of cumulusci.yml
to convert dynamic github dependencies into static dependencies
by inspecting the referenced repositories
Keyword arguments:
:param dependencies: a list of dependencies to resolve
:param include_beta: when true, return the latest github release,
even if pre-release; else return the latest stable release
]
if <ast.UnaryOp object at 0x7da1b1662e90> begin[:]
variable[dependencies] assign[=] name[self].project__dependencies
if <ast.UnaryOp object at 0x7da1b1663730> begin[:]
return[list[[]]]
variable[static_dependencies] assign[=] list[[]]
for taget[name[dependency]] in starred[name[dependencies]] begin[:]
if compare[constant[github] <ast.NotIn object at 0x7da2590d7190> name[dependency]] begin[:]
call[name[static_dependencies].append, parameter[name[dependency]]]
return[name[static_dependencies]]
|
keyword[def] identifier[get_static_dependencies] ( identifier[self] , identifier[dependencies] = keyword[None] , identifier[include_beta] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[dependencies] :
identifier[dependencies] = identifier[self] . identifier[project__dependencies]
keyword[if] keyword[not] identifier[dependencies] :
keyword[return] []
identifier[static_dependencies] =[]
keyword[for] identifier[dependency] keyword[in] identifier[dependencies] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[dependency] :
identifier[static_dependencies] . identifier[append] ( identifier[dependency] )
keyword[else] :
identifier[static] = identifier[self] . identifier[process_github_dependency] (
identifier[dependency] , identifier[include_beta] = identifier[include_beta]
)
identifier[static_dependencies] . identifier[extend] ( identifier[static] )
keyword[return] identifier[static_dependencies]
|
def get_static_dependencies(self, dependencies=None, include_beta=None):
"""Resolves the project -> dependencies section of cumulusci.yml
to convert dynamic github dependencies into static dependencies
by inspecting the referenced repositories
Keyword arguments:
:param dependencies: a list of dependencies to resolve
:param include_beta: when true, return the latest github release,
even if pre-release; else return the latest stable release
"""
if not dependencies:
dependencies = self.project__dependencies # depends on [control=['if'], data=[]]
if not dependencies:
return [] # depends on [control=['if'], data=[]]
static_dependencies = []
for dependency in dependencies:
if 'github' not in dependency:
static_dependencies.append(dependency) # depends on [control=['if'], data=['dependency']]
else:
static = self.process_github_dependency(dependency, include_beta=include_beta)
static_dependencies.extend(static) # depends on [control=['for'], data=['dependency']]
return static_dependencies
|
def express_route_cross_connection_peerings(self):
"""Instance depends on the API version:
* 2018-02-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-04-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-06-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_06_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-07-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_07_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-08-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
"""
api_version = self._get_api_version('express_route_cross_connection_peerings')
if api_version == '2018-02-01':
from .v2018_02_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass
elif api_version == '2018-04-01':
from .v2018_04_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass
elif api_version == '2018-06-01':
from .v2018_06_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass
elif api_version == '2018-07-01':
from .v2018_07_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass
elif api_version == '2018-08-01':
from .v2018_08_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
|
def function[express_route_cross_connection_peerings, parameter[self]]:
constant[Instance depends on the API version:
* 2018-02-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-04-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-06-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_06_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-07-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_07_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-08-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
]
variable[api_version] assign[=] call[name[self]._get_api_version, parameter[constant[express_route_cross_connection_peerings]]]
if compare[name[api_version] equal[==] constant[2018-02-01]] begin[:]
from relative_module[v2018_02_01.operations] import module[ExpressRouteCrossConnectionPeeringsOperations]
return[call[name[OperationClass], parameter[name[self]._client, name[self].config, call[name[Serializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]], call[name[Deserializer], parameter[call[name[self]._models_dict, parameter[name[api_version]]]]]]]]
|
keyword[def] identifier[express_route_cross_connection_peerings] ( identifier[self] ):
literal[string]
identifier[api_version] = identifier[self] . identifier[_get_api_version] ( literal[string] )
keyword[if] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_02_01] . identifier[operations] keyword[import] identifier[ExpressRouteCrossConnectionPeeringsOperations] keyword[as] identifier[OperationClass]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_04_01] . identifier[operations] keyword[import] identifier[ExpressRouteCrossConnectionPeeringsOperations] keyword[as] identifier[OperationClass]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_06_01] . identifier[operations] keyword[import] identifier[ExpressRouteCrossConnectionPeeringsOperations] keyword[as] identifier[OperationClass]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_07_01] . identifier[operations] keyword[import] identifier[ExpressRouteCrossConnectionPeeringsOperations] keyword[as] identifier[OperationClass]
keyword[elif] identifier[api_version] == literal[string] :
keyword[from] . identifier[v2018_08_01] . identifier[operations] keyword[import] identifier[ExpressRouteCrossConnectionPeeringsOperations] keyword[as] identifier[OperationClass]
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] ( identifier[api_version] ))
keyword[return] identifier[OperationClass] ( identifier[self] . identifier[_client] , identifier[self] . identifier[config] , identifier[Serializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )), identifier[Deserializer] ( identifier[self] . identifier[_models_dict] ( identifier[api_version] )))
|
def express_route_cross_connection_peerings(self):
"""Instance depends on the API version:
* 2018-02-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-04-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-06-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_06_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-07-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_07_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
* 2018-08-01: :class:`ExpressRouteCrossConnectionPeeringsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteCrossConnectionPeeringsOperations>`
"""
api_version = self._get_api_version('express_route_cross_connection_peerings')
if api_version == '2018-02-01':
from .v2018_02_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass # depends on [control=['if'], data=[]]
elif api_version == '2018-04-01':
from .v2018_04_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass # depends on [control=['if'], data=[]]
elif api_version == '2018-06-01':
from .v2018_06_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass # depends on [control=['if'], data=[]]
elif api_version == '2018-07-01':
from .v2018_07_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass # depends on [control=['if'], data=[]]
elif api_version == '2018-08-01':
from .v2018_08_01.operations import ExpressRouteCrossConnectionPeeringsOperations as OperationClass # depends on [control=['if'], data=[]]
else:
raise NotImplementedError('APIVersion {} is not available'.format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
|
def loader():
"""Load image from URL, and preprocess for Resnet."""
url = request.args.get('url') # read image URL as a request URL param
response = requests.get(url) # make request to static image file
return response.content
|
def function[loader, parameter[]]:
constant[Load image from URL, and preprocess for Resnet.]
variable[url] assign[=] call[name[request].args.get, parameter[constant[url]]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
return[name[response].content]
|
keyword[def] identifier[loader] ():
literal[string]
identifier[url] = identifier[request] . identifier[args] . identifier[get] ( literal[string] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] )
keyword[return] identifier[response] . identifier[content]
|
def loader():
"""Load image from URL, and preprocess for Resnet."""
url = request.args.get('url') # read image URL as a request URL param
response = requests.get(url) # make request to static image file
return response.content
|
def get_all(self, name, default=None):
"""make cookie python 3 version use this instead of getheaders"""
if default is None:
default = []
return self._headers.get_list(name) or default
|
def function[get_all, parameter[self, name, default]]:
constant[make cookie python 3 version use this instead of getheaders]
if compare[name[default] is constant[None]] begin[:]
variable[default] assign[=] list[[]]
return[<ast.BoolOp object at 0x7da1b21d5360>]
|
keyword[def] identifier[get_all] ( identifier[self] , identifier[name] , identifier[default] = keyword[None] ):
literal[string]
keyword[if] identifier[default] keyword[is] keyword[None] :
identifier[default] =[]
keyword[return] identifier[self] . identifier[_headers] . identifier[get_list] ( identifier[name] ) keyword[or] identifier[default]
|
def get_all(self, name, default=None):
"""make cookie python 3 version use this instead of getheaders"""
if default is None:
default = [] # depends on [control=['if'], data=['default']]
return self._headers.get_list(name) or default
|
def run_align(*data):
"""
Prepare data to run alignment step, only once for each project
"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "seqcluster", "prepare")
seq_out = op.join(out_dir, "seqs.fastq")
bam_dir = op.join(work_dir, "align")
new_bam_file = op.join(bam_dir, "seqs.bam")
tools = dd.get_expression_caller(data[0][0])
if not file_exists(new_bam_file):
sample = process_alignment(data[0][0], [seq_out, None])
bam_file = dd.get_work_bam(sample[0][0])
shutil.move(bam_file, new_bam_file)
shutil.move(bam_file + ".bai", new_bam_file + ".bai")
shutil.rmtree(op.join(bam_dir, sample[0][0]["rgnames"]['sample']))
for sample in data:
# sample[0]["align_bam"] = sample[0]["clean_fastq"]
sample[0]["cluster_bam"] = new_bam_file
if "mirdeep2" in tools:
novel_db = mirdeep.run(data)
return data
|
def function[run_align, parameter[]]:
constant[
Prepare data to run alignment step, only once for each project
]
variable[work_dir] assign[=] call[name[dd].get_work_dir, parameter[call[call[name[data]][constant[0]]][constant[0]]]]
variable[out_dir] assign[=] call[name[op].join, parameter[name[work_dir], constant[seqcluster], constant[prepare]]]
variable[seq_out] assign[=] call[name[op].join, parameter[name[out_dir], constant[seqs.fastq]]]
variable[bam_dir] assign[=] call[name[op].join, parameter[name[work_dir], constant[align]]]
variable[new_bam_file] assign[=] call[name[op].join, parameter[name[bam_dir], constant[seqs.bam]]]
variable[tools] assign[=] call[name[dd].get_expression_caller, parameter[call[call[name[data]][constant[0]]][constant[0]]]]
if <ast.UnaryOp object at 0x7da1b18a85b0> begin[:]
variable[sample] assign[=] call[name[process_alignment], parameter[call[call[name[data]][constant[0]]][constant[0]], list[[<ast.Name object at 0x7da1b18a9480>, <ast.Constant object at 0x7da1b18a8100>]]]]
variable[bam_file] assign[=] call[name[dd].get_work_bam, parameter[call[call[name[sample]][constant[0]]][constant[0]]]]
call[name[shutil].move, parameter[name[bam_file], name[new_bam_file]]]
call[name[shutil].move, parameter[binary_operation[name[bam_file] + constant[.bai]], binary_operation[name[new_bam_file] + constant[.bai]]]]
call[name[shutil].rmtree, parameter[call[name[op].join, parameter[name[bam_dir], call[call[call[call[name[sample]][constant[0]]][constant[0]]][constant[rgnames]]][constant[sample]]]]]]
for taget[name[sample]] in starred[name[data]] begin[:]
call[call[name[sample]][constant[0]]][constant[cluster_bam]] assign[=] name[new_bam_file]
if compare[constant[mirdeep2] in name[tools]] begin[:]
variable[novel_db] assign[=] call[name[mirdeep].run, parameter[name[data]]]
return[name[data]]
|
keyword[def] identifier[run_align] (* identifier[data] ):
literal[string]
identifier[work_dir] = identifier[dd] . identifier[get_work_dir] ( identifier[data] [ literal[int] ][ literal[int] ])
identifier[out_dir] = identifier[op] . identifier[join] ( identifier[work_dir] , literal[string] , literal[string] )
identifier[seq_out] = identifier[op] . identifier[join] ( identifier[out_dir] , literal[string] )
identifier[bam_dir] = identifier[op] . identifier[join] ( identifier[work_dir] , literal[string] )
identifier[new_bam_file] = identifier[op] . identifier[join] ( identifier[bam_dir] , literal[string] )
identifier[tools] = identifier[dd] . identifier[get_expression_caller] ( identifier[data] [ literal[int] ][ literal[int] ])
keyword[if] keyword[not] identifier[file_exists] ( identifier[new_bam_file] ):
identifier[sample] = identifier[process_alignment] ( identifier[data] [ literal[int] ][ literal[int] ],[ identifier[seq_out] , keyword[None] ])
identifier[bam_file] = identifier[dd] . identifier[get_work_bam] ( identifier[sample] [ literal[int] ][ literal[int] ])
identifier[shutil] . identifier[move] ( identifier[bam_file] , identifier[new_bam_file] )
identifier[shutil] . identifier[move] ( identifier[bam_file] + literal[string] , identifier[new_bam_file] + literal[string] )
identifier[shutil] . identifier[rmtree] ( identifier[op] . identifier[join] ( identifier[bam_dir] , identifier[sample] [ literal[int] ][ literal[int] ][ literal[string] ][ literal[string] ]))
keyword[for] identifier[sample] keyword[in] identifier[data] :
identifier[sample] [ literal[int] ][ literal[string] ]= identifier[new_bam_file]
keyword[if] literal[string] keyword[in] identifier[tools] :
identifier[novel_db] = identifier[mirdeep] . identifier[run] ( identifier[data] )
keyword[return] identifier[data]
|
def run_align(*data):
"""
Prepare data to run alignment step, only once for each project
"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, 'seqcluster', 'prepare')
seq_out = op.join(out_dir, 'seqs.fastq')
bam_dir = op.join(work_dir, 'align')
new_bam_file = op.join(bam_dir, 'seqs.bam')
tools = dd.get_expression_caller(data[0][0])
if not file_exists(new_bam_file):
sample = process_alignment(data[0][0], [seq_out, None])
bam_file = dd.get_work_bam(sample[0][0])
shutil.move(bam_file, new_bam_file)
shutil.move(bam_file + '.bai', new_bam_file + '.bai')
shutil.rmtree(op.join(bam_dir, sample[0][0]['rgnames']['sample'])) # depends on [control=['if'], data=[]]
for sample in data:
# sample[0]["align_bam"] = sample[0]["clean_fastq"]
sample[0]['cluster_bam'] = new_bam_file # depends on [control=['for'], data=['sample']]
if 'mirdeep2' in tools:
novel_db = mirdeep.run(data) # depends on [control=['if'], data=[]]
return data
|
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
|
def function[_tab_newline_replace, parameter[self, fromlines, tolines]]:
constant[Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
]
def function[expand_tabs, parameter[line]]:
variable[line] assign[=] call[name[line].replace, parameter[constant[ ], constant[ ]]]
variable[line] assign[=] call[name[line].expandtabs, parameter[name[self]._tabsize]]
variable[line] assign[=] call[name[line].replace, parameter[constant[ ], constant[ ]]]
return[call[call[name[line].replace, parameter[constant[ ], constant[ ]]].rstrip, parameter[constant[
]]]]
variable[fromlines] assign[=] <ast.ListComp object at 0x7da2041d8ca0>
variable[tolines] assign[=] <ast.ListComp object at 0x7da2041dbc10>
return[tuple[[<ast.Name object at 0x7da2041d9e40>, <ast.Name object at 0x7da2041dada0>]]]
|
keyword[def] identifier[_tab_newline_replace] ( identifier[self] , identifier[fromlines] , identifier[tolines] ):
literal[string]
keyword[def] identifier[expand_tabs] ( identifier[line] ):
identifier[line] = identifier[line] . identifier[replace] ( literal[string] , literal[string] )
identifier[line] = identifier[line] . identifier[expandtabs] ( identifier[self] . identifier[_tabsize] )
identifier[line] = identifier[line] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[line] . identifier[replace] ( literal[string] , literal[string] ). identifier[rstrip] ( literal[string] )
identifier[fromlines] =[ identifier[expand_tabs] ( identifier[line] ) keyword[for] identifier[line] keyword[in] identifier[fromlines] ]
identifier[tolines] =[ identifier[expand_tabs] ( identifier[line] ) keyword[for] identifier[line] keyword[in] identifier[tolines] ]
keyword[return] identifier[fromlines] , identifier[tolines]
|
def _tab_newline_replace(self, fromlines, tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ', '\x00')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ', '\t')
return line.replace('\x00', ' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return (fromlines, tolines)
|
def add_child(self, n, parent, **attrs):
'''
API: add_child(self, n, parent, **attrs)
Description:
Adds child n to node parent and return Node n.
Pre:
Node with name parent should exist.
Input:
n: Child node name.
parent: Parent node name.
attrs: Attributes of node being added.
Post:
Updates Graph related graph data attributes.
Return:
Returns n Node instance.
'''
attrs['level'] = self.get_node(parent).get_attr('level') + 1
attrs['parent'] = parent
self.add_node(n, **attrs)
self.add_edge(parent, n)
return self.get_node(n)
|
def function[add_child, parameter[self, n, parent]]:
constant[
API: add_child(self, n, parent, **attrs)
Description:
Adds child n to node parent and return Node n.
Pre:
Node with name parent should exist.
Input:
n: Child node name.
parent: Parent node name.
attrs: Attributes of node being added.
Post:
Updates Graph related graph data attributes.
Return:
Returns n Node instance.
]
call[name[attrs]][constant[level]] assign[=] binary_operation[call[call[name[self].get_node, parameter[name[parent]]].get_attr, parameter[constant[level]]] + constant[1]]
call[name[attrs]][constant[parent]] assign[=] name[parent]
call[name[self].add_node, parameter[name[n]]]
call[name[self].add_edge, parameter[name[parent], name[n]]]
return[call[name[self].get_node, parameter[name[n]]]]
|
keyword[def] identifier[add_child] ( identifier[self] , identifier[n] , identifier[parent] ,** identifier[attrs] ):
literal[string]
identifier[attrs] [ literal[string] ]= identifier[self] . identifier[get_node] ( identifier[parent] ). identifier[get_attr] ( literal[string] )+ literal[int]
identifier[attrs] [ literal[string] ]= identifier[parent]
identifier[self] . identifier[add_node] ( identifier[n] ,** identifier[attrs] )
identifier[self] . identifier[add_edge] ( identifier[parent] , identifier[n] )
keyword[return] identifier[self] . identifier[get_node] ( identifier[n] )
|
def add_child(self, n, parent, **attrs):
"""
API: add_child(self, n, parent, **attrs)
Description:
Adds child n to node parent and return Node n.
Pre:
Node with name parent should exist.
Input:
n: Child node name.
parent: Parent node name.
attrs: Attributes of node being added.
Post:
Updates Graph related graph data attributes.
Return:
Returns n Node instance.
"""
attrs['level'] = self.get_node(parent).get_attr('level') + 1
attrs['parent'] = parent
self.add_node(n, **attrs)
self.add_edge(parent, n)
return self.get_node(n)
|
def _handle_decl_list(self, node, scope, ctxt, stream):
"""Handle For nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling decl list")
# just handle each declaration
for decl in node.decls:
self._handle_node(decl, scope, ctxt, stream)
|
def function[_handle_decl_list, parameter[self, node, scope, ctxt, stream]]:
constant[Handle For nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
]
call[name[self]._dlog, parameter[constant[handling decl list]]]
for taget[name[decl]] in starred[name[node].decls] begin[:]
call[name[self]._handle_node, parameter[name[decl], name[scope], name[ctxt], name[stream]]]
|
keyword[def] identifier[_handle_decl_list] ( identifier[self] , identifier[node] , identifier[scope] , identifier[ctxt] , identifier[stream] ):
literal[string]
identifier[self] . identifier[_dlog] ( literal[string] )
keyword[for] identifier[decl] keyword[in] identifier[node] . identifier[decls] :
identifier[self] . identifier[_handle_node] ( identifier[decl] , identifier[scope] , identifier[ctxt] , identifier[stream] )
|
def _handle_decl_list(self, node, scope, ctxt, stream):
"""Handle For nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog('handling decl list')
# just handle each declaration
for decl in node.decls:
self._handle_node(decl, scope, ctxt, stream) # depends on [control=['for'], data=['decl']]
|
def print_inheritance(doc, stream):
# type: (List[Dict[Text, Any]], IO) -> None
"""Write a Grapviz inheritance graph for the supplied document."""
stream.write("digraph {\n")
for entry in doc:
if entry["type"] == "record":
label = name = shortname(entry["name"])
fields = entry.get("fields", [])
if fields:
label += "\\n* %s\\l" % (
"\\l* ".join(shortname(field["name"])
for field in fields))
shape = "ellipse" if entry.get("abstract") else "box"
stream.write("\"%s\" [shape=%s label=\"%s\"];\n"
% (name, shape, label))
if "extends" in entry:
for target in aslist(entry["extends"]):
stream.write("\"%s\" -> \"%s\";\n"
% (shortname(target), name))
stream.write("}\n")
|
def function[print_inheritance, parameter[doc, stream]]:
constant[Write a Grapviz inheritance graph for the supplied document.]
call[name[stream].write, parameter[constant[digraph {
]]]
for taget[name[entry]] in starred[name[doc]] begin[:]
if compare[call[name[entry]][constant[type]] equal[==] constant[record]] begin[:]
variable[label] assign[=] call[name[shortname], parameter[call[name[entry]][constant[name]]]]
variable[fields] assign[=] call[name[entry].get, parameter[constant[fields], list[[]]]]
if name[fields] begin[:]
<ast.AugAssign object at 0x7da1b0f0c9d0>
variable[shape] assign[=] <ast.IfExp object at 0x7da1b0f0fd60>
call[name[stream].write, parameter[binary_operation[constant["%s" [shape=%s label="%s"];
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0fc5570>, <ast.Name object at 0x7da1b0fc7d90>, <ast.Name object at 0x7da1b0fc4e50>]]]]]
if compare[constant[extends] in name[entry]] begin[:]
for taget[name[target]] in starred[call[name[aslist], parameter[call[name[entry]][constant[extends]]]]] begin[:]
call[name[stream].write, parameter[binary_operation[constant["%s" -> "%s";
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0fc4b20>, <ast.Name object at 0x7da1b0fc5540>]]]]]
call[name[stream].write, parameter[constant[}
]]]
|
keyword[def] identifier[print_inheritance] ( identifier[doc] , identifier[stream] ):
literal[string]
identifier[stream] . identifier[write] ( literal[string] )
keyword[for] identifier[entry] keyword[in] identifier[doc] :
keyword[if] identifier[entry] [ literal[string] ]== literal[string] :
identifier[label] = identifier[name] = identifier[shortname] ( identifier[entry] [ literal[string] ])
identifier[fields] = identifier[entry] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[fields] :
identifier[label] += literal[string] %(
literal[string] . identifier[join] ( identifier[shortname] ( identifier[field] [ literal[string] ])
keyword[for] identifier[field] keyword[in] identifier[fields] ))
identifier[shape] = literal[string] keyword[if] identifier[entry] . identifier[get] ( literal[string] ) keyword[else] literal[string]
identifier[stream] . identifier[write] ( literal[string]
%( identifier[name] , identifier[shape] , identifier[label] ))
keyword[if] literal[string] keyword[in] identifier[entry] :
keyword[for] identifier[target] keyword[in] identifier[aslist] ( identifier[entry] [ literal[string] ]):
identifier[stream] . identifier[write] ( literal[string]
%( identifier[shortname] ( identifier[target] ), identifier[name] ))
identifier[stream] . identifier[write] ( literal[string] )
|
def print_inheritance(doc, stream):
# type: (List[Dict[Text, Any]], IO) -> None
'Write a Grapviz inheritance graph for the supplied document.'
stream.write('digraph {\n')
for entry in doc:
if entry['type'] == 'record':
label = name = shortname(entry['name'])
fields = entry.get('fields', [])
if fields:
label += '\\n* %s\\l' % '\\l* '.join((shortname(field['name']) for field in fields)) # depends on [control=['if'], data=[]]
shape = 'ellipse' if entry.get('abstract') else 'box'
stream.write('"%s" [shape=%s label="%s"];\n' % (name, shape, label))
if 'extends' in entry:
for target in aslist(entry['extends']):
stream.write('"%s" -> "%s";\n' % (shortname(target), name)) # depends on [control=['for'], data=['target']] # depends on [control=['if'], data=['entry']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']]
stream.write('}\n')
|
def _deserialize_primitive(data, klass):
"""Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = six.u(data)
except TypeError:
value = data
return value
|
def function[_deserialize_primitive, parameter[data, klass]]:
constant[Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool
]
<ast.Try object at 0x7da207f01600>
return[name[value]]
|
keyword[def] identifier[_deserialize_primitive] ( identifier[data] , identifier[klass] ):
literal[string]
keyword[try] :
identifier[value] = identifier[klass] ( identifier[data] )
keyword[except] identifier[UnicodeEncodeError] :
identifier[value] = identifier[six] . identifier[u] ( identifier[data] )
keyword[except] identifier[TypeError] :
identifier[value] = identifier[data]
keyword[return] identifier[value]
|
def _deserialize_primitive(data, klass):
"""Deserializes to primitive type.
:param data: data to deserialize.
:param klass: class literal.
:return: int, long, float, str, bool.
:rtype: int | long | float | str | bool
"""
try:
value = klass(data) # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
value = six.u(data) # depends on [control=['except'], data=[]]
except TypeError:
value = data # depends on [control=['except'], data=[]]
return value
|
def _tf_predict(model_dir, input_csvlines):
"""Prediction with a tf savedmodel.
Args:
model_dir: directory that contains a saved model
input_csvlines: list of csv strings
Returns:
Dict in the form tensor_name:prediction_list. Note that the value is always
a list, even if there was only 1 row in input_csvlines.
"""
with tf.Graph().as_default(), tf.Session() as sess:
input_alias_map, output_alias_map = _tf_load_model(sess, model_dir)
csv_tensor_name = list(input_alias_map.values())[0]
results = sess.run(fetches=output_alias_map,
feed_dict={csv_tensor_name: input_csvlines})
# convert any scalar values to a list. This may happen when there is one
# example in input_csvlines and the model uses tf.squeeze on the output
# tensor.
if len(input_csvlines) == 1:
for k, v in six.iteritems(results):
if not isinstance(v, (list, np.ndarray)):
results[k] = [v]
# Convert bytes to string. In python3 the results may be bytes.
for k, v in six.iteritems(results):
if any(isinstance(x, bytes) for x in v):
results[k] = [x.decode('utf-8') for x in v]
return results
|
def function[_tf_predict, parameter[model_dir, input_csvlines]]:
constant[Prediction with a tf savedmodel.
Args:
model_dir: directory that contains a saved model
input_csvlines: list of csv strings
Returns:
Dict in the form tensor_name:prediction_list. Note that the value is always
a list, even if there was only 1 row in input_csvlines.
]
with call[call[name[tf].Graph, parameter[]].as_default, parameter[]] begin[:]
<ast.Tuple object at 0x7da20c6aa740> assign[=] call[name[_tf_load_model], parameter[name[sess], name[model_dir]]]
variable[csv_tensor_name] assign[=] call[call[name[list], parameter[call[name[input_alias_map].values, parameter[]]]]][constant[0]]
variable[results] assign[=] call[name[sess].run, parameter[]]
if compare[call[name[len], parameter[name[input_csvlines]]] equal[==] constant[1]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18ede6bc0>, <ast.Name object at 0x7da18ede5ba0>]]] in starred[call[name[six].iteritems, parameter[name[results]]]] begin[:]
if <ast.UnaryOp object at 0x7da18ede4e20> begin[:]
call[name[results]][name[k]] assign[=] list[[<ast.Name object at 0x7da18ede4040>]]
for taget[tuple[[<ast.Name object at 0x7da18ede5060>, <ast.Name object at 0x7da18ede7970>]]] in starred[call[name[six].iteritems, parameter[name[results]]]] begin[:]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da18ede42e0>]] begin[:]
call[name[results]][name[k]] assign[=] <ast.ListComp object at 0x7da18ede7e20>
return[name[results]]
|
keyword[def] identifier[_tf_predict] ( identifier[model_dir] , identifier[input_csvlines] ):
literal[string]
keyword[with] identifier[tf] . identifier[Graph] (). identifier[as_default] (), identifier[tf] . identifier[Session] () keyword[as] identifier[sess] :
identifier[input_alias_map] , identifier[output_alias_map] = identifier[_tf_load_model] ( identifier[sess] , identifier[model_dir] )
identifier[csv_tensor_name] = identifier[list] ( identifier[input_alias_map] . identifier[values] ())[ literal[int] ]
identifier[results] = identifier[sess] . identifier[run] ( identifier[fetches] = identifier[output_alias_map] ,
identifier[feed_dict] ={ identifier[csv_tensor_name] : identifier[input_csvlines] })
keyword[if] identifier[len] ( identifier[input_csvlines] )== literal[int] :
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[results] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] ,( identifier[list] , identifier[np] . identifier[ndarray] )):
identifier[results] [ identifier[k] ]=[ identifier[v] ]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[results] ):
keyword[if] identifier[any] ( identifier[isinstance] ( identifier[x] , identifier[bytes] ) keyword[for] identifier[x] keyword[in] identifier[v] ):
identifier[results] [ identifier[k] ]=[ identifier[x] . identifier[decode] ( literal[string] ) keyword[for] identifier[x] keyword[in] identifier[v] ]
keyword[return] identifier[results]
|
def _tf_predict(model_dir, input_csvlines):
"""Prediction with a tf savedmodel.
Args:
model_dir: directory that contains a saved model
input_csvlines: list of csv strings
Returns:
Dict in the form tensor_name:prediction_list. Note that the value is always
a list, even if there was only 1 row in input_csvlines.
"""
with tf.Graph().as_default(), tf.Session() as sess:
(input_alias_map, output_alias_map) = _tf_load_model(sess, model_dir)
csv_tensor_name = list(input_alias_map.values())[0]
results = sess.run(fetches=output_alias_map, feed_dict={csv_tensor_name: input_csvlines}) # depends on [control=['with'], data=[]]
# convert any scalar values to a list. This may happen when there is one
# example in input_csvlines and the model uses tf.squeeze on the output
# tensor.
if len(input_csvlines) == 1:
for (k, v) in six.iteritems(results):
if not isinstance(v, (list, np.ndarray)):
results[k] = [v] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Convert bytes to string. In python3 the results may be bytes.
for (k, v) in six.iteritems(results):
if any((isinstance(x, bytes) for x in v)):
results[k] = [x.decode('utf-8') for x in v] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return results
|
def __industry_code(self):
''' import industry_code '''
csv_path = os.path.join(os.path.dirname(__file__),
self.industry_code_files)
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
for i in csv_data:
result[i[0]] = i[1].decode('utf-8')
return result
|
def function[__industry_code, parameter[self]]:
constant[ import industry_code ]
variable[csv_path] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], name[self].industry_code_files]]
with call[name[open], parameter[name[csv_path]]] begin[:]
variable[csv_data] assign[=] call[name[csv].reader, parameter[name[csv_file]]]
variable[result] assign[=] dictionary[[], []]
for taget[name[i]] in starred[name[csv_data]] begin[:]
call[name[result]][call[name[i]][constant[0]]] assign[=] call[call[name[i]][constant[1]].decode, parameter[constant[utf-8]]]
return[name[result]]
|
keyword[def] identifier[__industry_code] ( identifier[self] ):
literal[string]
identifier[csv_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ),
identifier[self] . identifier[industry_code_files] )
keyword[with] identifier[open] ( identifier[csv_path] ) keyword[as] identifier[csv_file] :
identifier[csv_data] = identifier[csv] . identifier[reader] ( identifier[csv_file] )
identifier[result] ={}
keyword[for] identifier[i] keyword[in] identifier[csv_data] :
identifier[result] [ identifier[i] [ literal[int] ]]= identifier[i] [ literal[int] ]. identifier[decode] ( literal[string] )
keyword[return] identifier[result]
|
def __industry_code(self):
""" import industry_code """
csv_path = os.path.join(os.path.dirname(__file__), self.industry_code_files)
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
for i in csv_data:
result[i[0]] = i[1].decode('utf-8') # depends on [control=['for'], data=['i']]
return result # depends on [control=['with'], data=['csv_file']]
|
def lock(self, lock_name, timeout=900):
"""
Attempt to use lock and unlock, which will work if the Cache is Redis,
but fall back to a memcached-compliant add/delete approach.
If the Jobtastic Cache isn't Redis or Memcache, or another product
with a compatible lock or add/delete API, then a custom locking function
will be required. However, Redis and Memcache are expected to account for
the vast majority of installations.
See:
- http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html
- http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA
"""
# Try Redis first
try:
try:
lock = self.cache.lock
except AttributeError:
try:
# Possibly using old Django-Redis
lock = self.cache.client.lock
except AttributeError:
# Possibly using Werkzeug + Redis
lock = self.cache._client.lock
have_lock = False
lock = lock(lock_name, timeout=timeout)
try:
have_lock = lock.acquire(blocking=True)
if have_lock:
yield
finally:
if have_lock:
lock.release()
except AttributeError:
# No lock method on the cache, so fall back to add
have_lock = False
try:
while not have_lock:
have_lock = self.cache.add(lock_name, 'locked', timeout)
if have_lock:
yield
finally:
if have_lock:
self.cache.delete(lock_name)
|
def function[lock, parameter[self, lock_name, timeout]]:
constant[
Attempt to use lock and unlock, which will work if the Cache is Redis,
but fall back to a memcached-compliant add/delete approach.
If the Jobtastic Cache isn't Redis or Memcache, or another product
with a compatible lock or add/delete API, then a custom locking function
will be required. However, Redis and Memcache are expected to account for
the vast majority of installations.
See:
- http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html
- http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA
]
<ast.Try object at 0x7da20c7cb4c0>
|
keyword[def] identifier[lock] ( identifier[self] , identifier[lock_name] , identifier[timeout] = literal[int] ):
literal[string]
keyword[try] :
keyword[try] :
identifier[lock] = identifier[self] . identifier[cache] . identifier[lock]
keyword[except] identifier[AttributeError] :
keyword[try] :
identifier[lock] = identifier[self] . identifier[cache] . identifier[client] . identifier[lock]
keyword[except] identifier[AttributeError] :
identifier[lock] = identifier[self] . identifier[cache] . identifier[_client] . identifier[lock]
identifier[have_lock] = keyword[False]
identifier[lock] = identifier[lock] ( identifier[lock_name] , identifier[timeout] = identifier[timeout] )
keyword[try] :
identifier[have_lock] = identifier[lock] . identifier[acquire] ( identifier[blocking] = keyword[True] )
keyword[if] identifier[have_lock] :
keyword[yield]
keyword[finally] :
keyword[if] identifier[have_lock] :
identifier[lock] . identifier[release] ()
keyword[except] identifier[AttributeError] :
identifier[have_lock] = keyword[False]
keyword[try] :
keyword[while] keyword[not] identifier[have_lock] :
identifier[have_lock] = identifier[self] . identifier[cache] . identifier[add] ( identifier[lock_name] , literal[string] , identifier[timeout] )
keyword[if] identifier[have_lock] :
keyword[yield]
keyword[finally] :
keyword[if] identifier[have_lock] :
identifier[self] . identifier[cache] . identifier[delete] ( identifier[lock_name] )
|
def lock(self, lock_name, timeout=900):
"""
Attempt to use lock and unlock, which will work if the Cache is Redis,
but fall back to a memcached-compliant add/delete approach.
If the Jobtastic Cache isn't Redis or Memcache, or another product
with a compatible lock or add/delete API, then a custom locking function
will be required. However, Redis and Memcache are expected to account for
the vast majority of installations.
See:
- http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html
- http://celery.readthedocs.org/en/latest/tutorials/task-cookbook.html#ensuring-a-task-is-only-executed-one-at-a-time # NOQA
"""
# Try Redis first
try:
try:
lock = self.cache.lock # depends on [control=['try'], data=[]]
except AttributeError:
try:
# Possibly using old Django-Redis
lock = self.cache.client.lock # depends on [control=['try'], data=[]]
except AttributeError:
# Possibly using Werkzeug + Redis
lock = self.cache._client.lock # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
have_lock = False
lock = lock(lock_name, timeout=timeout)
try:
have_lock = lock.acquire(blocking=True)
if have_lock:
yield # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
if have_lock:
lock.release() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError:
# No lock method on the cache, so fall back to add
have_lock = False
try:
while not have_lock:
have_lock = self.cache.add(lock_name, 'locked', timeout) # depends on [control=['while'], data=[]]
if have_lock:
yield # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
if have_lock:
self.cache.delete(lock_name) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
|
def set(self, dct) :
"""Set the store using a dictionary"""
# if not self.mustValidate :
# self.store = dct
# self.patchStore = dct
# return
for field, value in dct.items() :
if field not in self.collection.arangoPrivates :
if isinstance(value, dict) :
if field in self.validators and isinstance(self.validators[field], dict):
vals = self.validators[field]
else :
vals = {}
self[field] = DocumentStore(self.collection, validators = vals, initDct = value, patch = self.patching, subStore=True, validateInit=self.validateInit)
self.subStores[field] = self.store[field]
else :
self[field] = value
|
def function[set, parameter[self, dct]]:
constant[Set the store using a dictionary]
for taget[tuple[[<ast.Name object at 0x7da1b0de3580>, <ast.Name object at 0x7da1b0de3250>]]] in starred[call[name[dct].items, parameter[]]] begin[:]
if compare[name[field] <ast.NotIn object at 0x7da2590d7190> name[self].collection.arangoPrivates] begin[:]
if call[name[isinstance], parameter[name[value], name[dict]]] begin[:]
if <ast.BoolOp object at 0x7da1b0de31c0> begin[:]
variable[vals] assign[=] call[name[self].validators][name[field]]
call[name[self]][name[field]] assign[=] call[name[DocumentStore], parameter[name[self].collection]]
call[name[self].subStores][name[field]] assign[=] call[name[self].store][name[field]]
|
keyword[def] identifier[set] ( identifier[self] , identifier[dct] ):
literal[string]
keyword[for] identifier[field] , identifier[value] keyword[in] identifier[dct] . identifier[items] ():
keyword[if] identifier[field] keyword[not] keyword[in] identifier[self] . identifier[collection] . identifier[arangoPrivates] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ):
keyword[if] identifier[field] keyword[in] identifier[self] . identifier[validators] keyword[and] identifier[isinstance] ( identifier[self] . identifier[validators] [ identifier[field] ], identifier[dict] ):
identifier[vals] = identifier[self] . identifier[validators] [ identifier[field] ]
keyword[else] :
identifier[vals] ={}
identifier[self] [ identifier[field] ]= identifier[DocumentStore] ( identifier[self] . identifier[collection] , identifier[validators] = identifier[vals] , identifier[initDct] = identifier[value] , identifier[patch] = identifier[self] . identifier[patching] , identifier[subStore] = keyword[True] , identifier[validateInit] = identifier[self] . identifier[validateInit] )
identifier[self] . identifier[subStores] [ identifier[field] ]= identifier[self] . identifier[store] [ identifier[field] ]
keyword[else] :
identifier[self] [ identifier[field] ]= identifier[value]
|
def set(self, dct):
"""Set the store using a dictionary"""
# if not self.mustValidate :
# self.store = dct
# self.patchStore = dct
# return
for (field, value) in dct.items():
if field not in self.collection.arangoPrivates:
if isinstance(value, dict):
if field in self.validators and isinstance(self.validators[field], dict):
vals = self.validators[field] # depends on [control=['if'], data=[]]
else:
vals = {}
self[field] = DocumentStore(self.collection, validators=vals, initDct=value, patch=self.patching, subStore=True, validateInit=self.validateInit)
self.subStores[field] = self.store[field] # depends on [control=['if'], data=[]]
else:
self[field] = value # depends on [control=['if'], data=['field']] # depends on [control=['for'], data=[]]
|
def on_audio_adapter_change(self, audio_adapter):
"""Triggerd when settings of the audio adapter of the
associated virtual machine have changed.
in audio_adapter of type :class:`IAudioAdapter`
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
"""
if not isinstance(audio_adapter, IAudioAdapter):
raise TypeError("audio_adapter can only be an instance of type IAudioAdapter")
self._call("onAudioAdapterChange",
in_p=[audio_adapter])
|
def function[on_audio_adapter_change, parameter[self, audio_adapter]]:
constant[Triggerd when settings of the audio adapter of the
associated virtual machine have changed.
in audio_adapter of type :class:`IAudioAdapter`
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
]
if <ast.UnaryOp object at 0x7da20e9b0e50> begin[:]
<ast.Raise object at 0x7da20e9b1d80>
call[name[self]._call, parameter[constant[onAudioAdapterChange]]]
|
keyword[def] identifier[on_audio_adapter_change] ( identifier[self] , identifier[audio_adapter] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[audio_adapter] , identifier[IAudioAdapter] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_call] ( literal[string] ,
identifier[in_p] =[ identifier[audio_adapter] ])
|
def on_audio_adapter_change(self, audio_adapter):
"""Triggerd when settings of the audio adapter of the
associated virtual machine have changed.
in audio_adapter of type :class:`IAudioAdapter`
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation.
"""
if not isinstance(audio_adapter, IAudioAdapter):
raise TypeError('audio_adapter can only be an instance of type IAudioAdapter') # depends on [control=['if'], data=[]]
self._call('onAudioAdapterChange', in_p=[audio_adapter])
|
def parse(string, language=None):
"""
Return a solution to the equation in the input string.
"""
if language:
string = replace_word_tokens(string, language)
tokens = tokenize(string)
postfix = to_postfix(tokens)
return evaluate_postfix(postfix)
|
def function[parse, parameter[string, language]]:
constant[
Return a solution to the equation in the input string.
]
if name[language] begin[:]
variable[string] assign[=] call[name[replace_word_tokens], parameter[name[string], name[language]]]
variable[tokens] assign[=] call[name[tokenize], parameter[name[string]]]
variable[postfix] assign[=] call[name[to_postfix], parameter[name[tokens]]]
return[call[name[evaluate_postfix], parameter[name[postfix]]]]
|
keyword[def] identifier[parse] ( identifier[string] , identifier[language] = keyword[None] ):
literal[string]
keyword[if] identifier[language] :
identifier[string] = identifier[replace_word_tokens] ( identifier[string] , identifier[language] )
identifier[tokens] = identifier[tokenize] ( identifier[string] )
identifier[postfix] = identifier[to_postfix] ( identifier[tokens] )
keyword[return] identifier[evaluate_postfix] ( identifier[postfix] )
|
def parse(string, language=None):
"""
Return a solution to the equation in the input string.
"""
if language:
string = replace_word_tokens(string, language) # depends on [control=['if'], data=[]]
tokens = tokenize(string)
postfix = to_postfix(tokens)
return evaluate_postfix(postfix)
|
def tryCComment(self, block):
"""C comment checking. If the previous line begins with a "/*" or a "* ", then
return its leading white spaces + ' *' + the white spaces after the *
return: filler string or null, if not in a C comment
"""
indentation = None
prevNonEmptyBlock = self._prevNonEmptyBlock(block)
if not prevNonEmptyBlock.isValid():
return None
prevNonEmptyBlockText = prevNonEmptyBlock.text()
if prevNonEmptyBlockText.endswith('*/'):
try:
foundBlock, notUsedColumn = self.findTextBackward(prevNonEmptyBlock, prevNonEmptyBlock.length(), '/*')
except ValueError:
foundBlock = None
if foundBlock is not None:
dbg("tryCComment: success (1) in line %d" % foundBlock.blockNumber())
return self._lineIndent(foundBlock.text())
if prevNonEmptyBlock != block.previous():
# inbetween was an empty line, so do not copy the "*" character
return None
blockTextStripped = block.text().strip()
prevBlockTextStripped = prevNonEmptyBlockText.strip()
if prevBlockTextStripped.startswith('/*') and not '*/' in prevBlockTextStripped:
indentation = self._blockIndent(prevNonEmptyBlock)
if CFG_AUTO_INSERT_STAR:
# only add '*', if there is none yet.
indentation += ' '
if not blockTextStripped.endswith('*'):
indentation += '*'
secondCharIsSpace = len(blockTextStripped) > 1 and blockTextStripped[1].isspace()
if not secondCharIsSpace and \
not blockTextStripped.endswith("*/"):
indentation += ' '
dbg("tryCComment: success (2) in line %d" % block.blockNumber())
return indentation
elif prevBlockTextStripped.startswith('*') and \
(len(prevBlockTextStripped) == 1 or prevBlockTextStripped[1].isspace()):
# in theory, we could search for opening /*, and use its indentation
# and then one alignment character. Let's not do this for now, though.
indentation = self._lineIndent(prevNonEmptyBlockText)
# only add '*', if there is none yet.
if CFG_AUTO_INSERT_STAR and not blockTextStripped.startswith('*'):
indentation += '*'
if len(blockTextStripped) < 2 or not blockTextStripped[1].isspace():
indentation += ' '
dbg("tryCComment: success (2) in line %d" % block.blockNumber())
return indentation
return None
|
def function[tryCComment, parameter[self, block]]:
constant[C comment checking. If the previous line begins with a "/*" or a "* ", then
return its leading white spaces + ' *' + the white spaces after the *
return: filler string or null, if not in a C comment
]
variable[indentation] assign[=] constant[None]
variable[prevNonEmptyBlock] assign[=] call[name[self]._prevNonEmptyBlock, parameter[name[block]]]
if <ast.UnaryOp object at 0x7da20e9b02e0> begin[:]
return[constant[None]]
variable[prevNonEmptyBlockText] assign[=] call[name[prevNonEmptyBlock].text, parameter[]]
if call[name[prevNonEmptyBlockText].endswith, parameter[constant[*/]]] begin[:]
<ast.Try object at 0x7da20e9b0280>
if compare[name[foundBlock] is_not constant[None]] begin[:]
call[name[dbg], parameter[binary_operation[constant[tryCComment: success (1) in line %d] <ast.Mod object at 0x7da2590d6920> call[name[foundBlock].blockNumber, parameter[]]]]]
return[call[name[self]._lineIndent, parameter[call[name[foundBlock].text, parameter[]]]]]
if compare[name[prevNonEmptyBlock] not_equal[!=] call[name[block].previous, parameter[]]] begin[:]
return[constant[None]]
variable[blockTextStripped] assign[=] call[call[name[block].text, parameter[]].strip, parameter[]]
variable[prevBlockTextStripped] assign[=] call[name[prevNonEmptyBlockText].strip, parameter[]]
if <ast.BoolOp object at 0x7da18eb57730> begin[:]
variable[indentation] assign[=] call[name[self]._blockIndent, parameter[name[prevNonEmptyBlock]]]
if name[CFG_AUTO_INSERT_STAR] begin[:]
<ast.AugAssign object at 0x7da18eb56110>
if <ast.UnaryOp object at 0x7da18eb54460> begin[:]
<ast.AugAssign object at 0x7da1b057abf0>
variable[secondCharIsSpace] assign[=] <ast.BoolOp object at 0x7da1b057a3b0>
if <ast.BoolOp object at 0x7da1b057a590> begin[:]
<ast.AugAssign object at 0x7da1b057a350>
call[name[dbg], parameter[binary_operation[constant[tryCComment: success (2) in line %d] <ast.Mod object at 0x7da2590d6920> call[name[block].blockNumber, parameter[]]]]]
return[name[indentation]]
return[constant[None]]
|
keyword[def] identifier[tryCComment] ( identifier[self] , identifier[block] ):
literal[string]
identifier[indentation] = keyword[None]
identifier[prevNonEmptyBlock] = identifier[self] . identifier[_prevNonEmptyBlock] ( identifier[block] )
keyword[if] keyword[not] identifier[prevNonEmptyBlock] . identifier[isValid] ():
keyword[return] keyword[None]
identifier[prevNonEmptyBlockText] = identifier[prevNonEmptyBlock] . identifier[text] ()
keyword[if] identifier[prevNonEmptyBlockText] . identifier[endswith] ( literal[string] ):
keyword[try] :
identifier[foundBlock] , identifier[notUsedColumn] = identifier[self] . identifier[findTextBackward] ( identifier[prevNonEmptyBlock] , identifier[prevNonEmptyBlock] . identifier[length] (), literal[string] )
keyword[except] identifier[ValueError] :
identifier[foundBlock] = keyword[None]
keyword[if] identifier[foundBlock] keyword[is] keyword[not] keyword[None] :
identifier[dbg] ( literal[string] % identifier[foundBlock] . identifier[blockNumber] ())
keyword[return] identifier[self] . identifier[_lineIndent] ( identifier[foundBlock] . identifier[text] ())
keyword[if] identifier[prevNonEmptyBlock] != identifier[block] . identifier[previous] ():
keyword[return] keyword[None]
identifier[blockTextStripped] = identifier[block] . identifier[text] (). identifier[strip] ()
identifier[prevBlockTextStripped] = identifier[prevNonEmptyBlockText] . identifier[strip] ()
keyword[if] identifier[prevBlockTextStripped] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] literal[string] keyword[in] identifier[prevBlockTextStripped] :
identifier[indentation] = identifier[self] . identifier[_blockIndent] ( identifier[prevNonEmptyBlock] )
keyword[if] identifier[CFG_AUTO_INSERT_STAR] :
identifier[indentation] += literal[string]
keyword[if] keyword[not] identifier[blockTextStripped] . identifier[endswith] ( literal[string] ):
identifier[indentation] += literal[string]
identifier[secondCharIsSpace] = identifier[len] ( identifier[blockTextStripped] )> literal[int] keyword[and] identifier[blockTextStripped] [ literal[int] ]. identifier[isspace] ()
keyword[if] keyword[not] identifier[secondCharIsSpace] keyword[and] keyword[not] identifier[blockTextStripped] . identifier[endswith] ( literal[string] ):
identifier[indentation] += literal[string]
identifier[dbg] ( literal[string] % identifier[block] . identifier[blockNumber] ())
keyword[return] identifier[indentation]
keyword[elif] identifier[prevBlockTextStripped] . identifier[startswith] ( literal[string] ) keyword[and] ( identifier[len] ( identifier[prevBlockTextStripped] )== literal[int] keyword[or] identifier[prevBlockTextStripped] [ literal[int] ]. identifier[isspace] ()):
identifier[indentation] = identifier[self] . identifier[_lineIndent] ( identifier[prevNonEmptyBlockText] )
keyword[if] identifier[CFG_AUTO_INSERT_STAR] keyword[and] keyword[not] identifier[blockTextStripped] . identifier[startswith] ( literal[string] ):
identifier[indentation] += literal[string]
keyword[if] identifier[len] ( identifier[blockTextStripped] )< literal[int] keyword[or] keyword[not] identifier[blockTextStripped] [ literal[int] ]. identifier[isspace] ():
identifier[indentation] += literal[string]
identifier[dbg] ( literal[string] % identifier[block] . identifier[blockNumber] ())
keyword[return] identifier[indentation]
keyword[return] keyword[None]
|
def tryCComment(self, block):
"""C comment checking. If the previous line begins with a "/*" or a "* ", then
return its leading white spaces + ' *' + the white spaces after the *
return: filler string or null, if not in a C comment
"""
indentation = None
prevNonEmptyBlock = self._prevNonEmptyBlock(block)
if not prevNonEmptyBlock.isValid():
return None # depends on [control=['if'], data=[]]
prevNonEmptyBlockText = prevNonEmptyBlock.text()
if prevNonEmptyBlockText.endswith('*/'):
try:
(foundBlock, notUsedColumn) = self.findTextBackward(prevNonEmptyBlock, prevNonEmptyBlock.length(), '/*') # depends on [control=['try'], data=[]]
except ValueError:
foundBlock = None # depends on [control=['except'], data=[]]
if foundBlock is not None:
dbg('tryCComment: success (1) in line %d' % foundBlock.blockNumber())
return self._lineIndent(foundBlock.text()) # depends on [control=['if'], data=['foundBlock']] # depends on [control=['if'], data=[]]
if prevNonEmptyBlock != block.previous():
# inbetween was an empty line, so do not copy the "*" character
return None # depends on [control=['if'], data=[]]
blockTextStripped = block.text().strip()
prevBlockTextStripped = prevNonEmptyBlockText.strip()
if prevBlockTextStripped.startswith('/*') and (not '*/' in prevBlockTextStripped):
indentation = self._blockIndent(prevNonEmptyBlock)
if CFG_AUTO_INSERT_STAR:
# only add '*', if there is none yet.
indentation += ' '
if not blockTextStripped.endswith('*'):
indentation += '*' # depends on [control=['if'], data=[]]
secondCharIsSpace = len(blockTextStripped) > 1 and blockTextStripped[1].isspace()
if not secondCharIsSpace and (not blockTextStripped.endswith('*/')):
indentation += ' ' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
dbg('tryCComment: success (2) in line %d' % block.blockNumber())
return indentation # depends on [control=['if'], data=[]]
elif prevBlockTextStripped.startswith('*') and (len(prevBlockTextStripped) == 1 or prevBlockTextStripped[1].isspace()):
# in theory, we could search for opening /*, and use its indentation
# and then one alignment character. Let's not do this for now, though.
indentation = self._lineIndent(prevNonEmptyBlockText)
# only add '*', if there is none yet.
if CFG_AUTO_INSERT_STAR and (not blockTextStripped.startswith('*')):
indentation += '*'
if len(blockTextStripped) < 2 or not blockTextStripped[1].isspace():
indentation += ' ' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
dbg('tryCComment: success (2) in line %d' % block.blockNumber())
return indentation # depends on [control=['if'], data=[]]
return None
|
def combine(path1, path2):
# type: (Text, Text) -> Text
"""Join two paths together.
This is faster than :func:`~fs.path.join`, but only works when the
second path is relative, and there are no back references in either
path.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
str: The joint path.
Example:
>>> combine("foo/bar", "baz")
'foo/bar/baz'
"""
if not path1:
return path2.lstrip()
return "{}/{}".format(path1.rstrip("/"), path2.lstrip("/"))
|
def function[combine, parameter[path1, path2]]:
constant[Join two paths together.
This is faster than :func:`~fs.path.join`, but only works when the
second path is relative, and there are no back references in either
path.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
str: The joint path.
Example:
>>> combine("foo/bar", "baz")
'foo/bar/baz'
]
if <ast.UnaryOp object at 0x7da1b16bdc00> begin[:]
return[call[name[path2].lstrip, parameter[]]]
return[call[constant[{}/{}].format, parameter[call[name[path1].rstrip, parameter[constant[/]]], call[name[path2].lstrip, parameter[constant[/]]]]]]
|
keyword[def] identifier[combine] ( identifier[path1] , identifier[path2] ):
literal[string]
keyword[if] keyword[not] identifier[path1] :
keyword[return] identifier[path2] . identifier[lstrip] ()
keyword[return] literal[string] . identifier[format] ( identifier[path1] . identifier[rstrip] ( literal[string] ), identifier[path2] . identifier[lstrip] ( literal[string] ))
|
def combine(path1, path2):
# type: (Text, Text) -> Text
'Join two paths together.\n\n This is faster than :func:`~fs.path.join`, but only works when the\n second path is relative, and there are no back references in either\n path.\n\n Arguments:\n path1 (str): A PyFilesytem path.\n path2 (str): A PyFilesytem path.\n\n Returns:\n str: The joint path.\n\n Example:\n >>> combine("foo/bar", "baz")\n \'foo/bar/baz\'\n\n '
if not path1:
return path2.lstrip() # depends on [control=['if'], data=[]]
return '{}/{}'.format(path1.rstrip('/'), path2.lstrip('/'))
|
def create_assignment_group(self, course_id, group_weight=None, integration_data=None, name=None, position=None, rules=None, sis_source_id=None):
"""
Create an Assignment Group.
Create a new assignment group for this course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - name
"""The assignment group's name"""
if name is not None:
data["name"] = name
# OPTIONAL - position
"""The position of this assignment group in relation to the other assignment groups"""
if position is not None:
data["position"] = position
# OPTIONAL - group_weight
"""The percent of the total grade that this assignment group represents"""
if group_weight is not None:
data["group_weight"] = group_weight
# OPTIONAL - sis_source_id
"""The sis source id of the Assignment Group"""
if sis_source_id is not None:
data["sis_source_id"] = sis_source_id
# OPTIONAL - integration_data
"""The integration data of the Assignment Group"""
if integration_data is not None:
data["integration_data"] = integration_data
# OPTIONAL - rules
"""The grading rules that are applied within this assignment group
See the Assignment Group object definition for format"""
if rules is not None:
data["rules"] = rules
self.logger.debug("POST /api/v1/courses/{course_id}/assignment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/assignment_groups".format(**path), data=data, params=params, single_item=True)
|
def function[create_assignment_group, parameter[self, course_id, group_weight, integration_data, name, position, rules, sis_source_id]]:
constant[
Create an Assignment Group.
Create a new assignment group for this course.
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[course_id]] assign[=] name[course_id]
constant[The assignment group's name]
if compare[name[name] is_not constant[None]] begin[:]
call[name[data]][constant[name]] assign[=] name[name]
constant[The position of this assignment group in relation to the other assignment groups]
if compare[name[position] is_not constant[None]] begin[:]
call[name[data]][constant[position]] assign[=] name[position]
constant[The percent of the total grade that this assignment group represents]
if compare[name[group_weight] is_not constant[None]] begin[:]
call[name[data]][constant[group_weight]] assign[=] name[group_weight]
constant[The sis source id of the Assignment Group]
if compare[name[sis_source_id] is_not constant[None]] begin[:]
call[name[data]][constant[sis_source_id]] assign[=] name[sis_source_id]
constant[The integration data of the Assignment Group]
if compare[name[integration_data] is_not constant[None]] begin[:]
call[name[data]][constant[integration_data]] assign[=] name[integration_data]
constant[The grading rules that are applied within this assignment group
See the Assignment Group object definition for format]
if compare[name[rules] is_not constant[None]] begin[:]
call[name[data]][constant[rules]] assign[=] name[rules]
call[name[self].logger.debug, parameter[call[constant[POST /api/v1/courses/{course_id}/assignment_groups with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[POST], call[constant[/api/v1/courses/{course_id}/assignment_groups].format, parameter[]]]]]
|
keyword[def] identifier[create_assignment_group] ( identifier[self] , identifier[course_id] , identifier[group_weight] = keyword[None] , identifier[integration_data] = keyword[None] , identifier[name] = keyword[None] , identifier[position] = keyword[None] , identifier[rules] = keyword[None] , identifier[sis_source_id] = keyword[None] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[course_id]
literal[string]
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[name]
literal[string]
keyword[if] identifier[position] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[position]
literal[string]
keyword[if] identifier[group_weight] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[group_weight]
literal[string]
keyword[if] identifier[sis_source_id] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[sis_source_id]
literal[string]
keyword[if] identifier[integration_data] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[integration_data]
literal[string]
keyword[if] identifier[rules] keyword[is] keyword[not] keyword[None] :
identifier[data] [ literal[string] ]= identifier[rules]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[single_item] = keyword[True] )
|
def create_assignment_group(self, course_id, group_weight=None, integration_data=None, name=None, position=None, rules=None, sis_source_id=None):
"""
Create an Assignment Group.
Create a new assignment group for this course.
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - course_id
'ID'
path['course_id'] = course_id # OPTIONAL - name
"The assignment group's name"
if name is not None:
data['name'] = name # depends on [control=['if'], data=['name']] # OPTIONAL - position
'The position of this assignment group in relation to the other assignment groups'
if position is not None:
data['position'] = position # depends on [control=['if'], data=['position']] # OPTIONAL - group_weight
'The percent of the total grade that this assignment group represents'
if group_weight is not None:
data['group_weight'] = group_weight # depends on [control=['if'], data=['group_weight']] # OPTIONAL - sis_source_id
'The sis source id of the Assignment Group'
if sis_source_id is not None:
data['sis_source_id'] = sis_source_id # depends on [control=['if'], data=['sis_source_id']] # OPTIONAL - integration_data
'The integration data of the Assignment Group'
if integration_data is not None:
data['integration_data'] = integration_data # depends on [control=['if'], data=['integration_data']] # OPTIONAL - rules
'The grading rules that are applied within this assignment group\n See the Assignment Group object definition for format'
if rules is not None:
data['rules'] = rules # depends on [control=['if'], data=['rules']]
self.logger.debug('POST /api/v1/courses/{course_id}/assignment_groups with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('POST', '/api/v1/courses/{course_id}/assignment_groups'.format(**path), data=data, params=params, single_item=True)
|
def generate_query(command):
"""Add header, checksum and footer to command data."""
data = bytearray(command)
c = checksum(data)
data.append(c >> 8)
data.append(c & 0xFF)
data.replace(b'\xFE', b'\xFE\xF0')
data = bytearray.fromhex("FEFE") + data + bytearray.fromhex("FE0D")
return data
|
def function[generate_query, parameter[command]]:
constant[Add header, checksum and footer to command data.]
variable[data] assign[=] call[name[bytearray], parameter[name[command]]]
variable[c] assign[=] call[name[checksum], parameter[name[data]]]
call[name[data].append, parameter[binary_operation[name[c] <ast.RShift object at 0x7da2590d6a40> constant[8]]]]
call[name[data].append, parameter[binary_operation[name[c] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[data].replace, parameter[constant[b'\xfe'], constant[b'\xfe\xf0']]]
variable[data] assign[=] binary_operation[binary_operation[call[name[bytearray].fromhex, parameter[constant[FEFE]]] + name[data]] + call[name[bytearray].fromhex, parameter[constant[FE0D]]]]
return[name[data]]
|
keyword[def] identifier[generate_query] ( identifier[command] ):
literal[string]
identifier[data] = identifier[bytearray] ( identifier[command] )
identifier[c] = identifier[checksum] ( identifier[data] )
identifier[data] . identifier[append] ( identifier[c] >> literal[int] )
identifier[data] . identifier[append] ( identifier[c] & literal[int] )
identifier[data] . identifier[replace] ( literal[string] , literal[string] )
identifier[data] = identifier[bytearray] . identifier[fromhex] ( literal[string] )+ identifier[data] + identifier[bytearray] . identifier[fromhex] ( literal[string] )
keyword[return] identifier[data]
|
def generate_query(command):
"""Add header, checksum and footer to command data."""
data = bytearray(command)
c = checksum(data)
data.append(c >> 8)
data.append(c & 255)
data.replace(b'\xfe', b'\xfe\xf0')
data = bytearray.fromhex('FEFE') + data + bytearray.fromhex('FE0D')
return data
|
def approvecommittee(self, committees, account=None, **kwargs):
""" Approve a committee
:param list committees: list of committee member name or id
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account, blockchain_instance=self)
options = account["options"]
if not isinstance(committees, (list, set, tuple)):
committees = {committees}
for committee in committees:
committee = Committee(committee, blockchain_instance=self)
options["votes"].append(committee["vote_id"])
options["votes"] = list(set(options["votes"]))
options["num_committee"] = len(
list(filter(lambda x: float(x.split(":")[0]) == 0, options["votes"]))
)
op = operations.Account_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"account": account["id"],
"new_options": options,
"extensions": {},
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active", **kwargs)
|
def function[approvecommittee, parameter[self, committees, account]]:
constant[ Approve a committee
:param list committees: list of committee member name or id
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
]
if <ast.UnaryOp object at 0x7da1b103b850> begin[:]
if compare[constant[default_account] in name[self].config] begin[:]
variable[account] assign[=] call[name[self].config][constant[default_account]]
if <ast.UnaryOp object at 0x7da1b1038340> begin[:]
<ast.Raise object at 0x7da1b10386a0>
variable[account] assign[=] call[name[Account], parameter[name[account]]]
variable[options] assign[=] call[name[account]][constant[options]]
if <ast.UnaryOp object at 0x7da1b103a7d0> begin[:]
variable[committees] assign[=] <ast.Set object at 0x7da1b1038d60>
for taget[name[committee]] in starred[name[committees]] begin[:]
variable[committee] assign[=] call[name[Committee], parameter[name[committee]]]
call[call[name[options]][constant[votes]].append, parameter[call[name[committee]][constant[vote_id]]]]
call[name[options]][constant[votes]] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[options]][constant[votes]]]]]]
call[name[options]][constant[num_committee]] assign[=] call[name[len], parameter[call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da1b103a1d0>, call[name[options]][constant[votes]]]]]]]]
variable[op] assign[=] call[name[operations].Account_update, parameter[]]
return[call[name[self].finalizeOp, parameter[name[op], call[name[account]][constant[name]], constant[active]]]]
|
keyword[def] identifier[approvecommittee] ( identifier[self] , identifier[committees] , identifier[account] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[account] :
keyword[if] literal[string] keyword[in] identifier[self] . identifier[config] :
identifier[account] = identifier[self] . identifier[config] [ literal[string] ]
keyword[if] keyword[not] identifier[account] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[account] = identifier[Account] ( identifier[account] , identifier[blockchain_instance] = identifier[self] )
identifier[options] = identifier[account] [ literal[string] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[committees] ,( identifier[list] , identifier[set] , identifier[tuple] )):
identifier[committees] ={ identifier[committees] }
keyword[for] identifier[committee] keyword[in] identifier[committees] :
identifier[committee] = identifier[Committee] ( identifier[committee] , identifier[blockchain_instance] = identifier[self] )
identifier[options] [ literal[string] ]. identifier[append] ( identifier[committee] [ literal[string] ])
identifier[options] [ literal[string] ]= identifier[list] ( identifier[set] ( identifier[options] [ literal[string] ]))
identifier[options] [ literal[string] ]= identifier[len] (
identifier[list] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[float] ( identifier[x] . identifier[split] ( literal[string] )[ literal[int] ])== literal[int] , identifier[options] [ literal[string] ]))
)
identifier[op] = identifier[operations] . identifier[Account_update] (
**{
literal[string] :{ literal[string] : literal[int] , literal[string] : literal[string] },
literal[string] : identifier[account] [ literal[string] ],
literal[string] : identifier[options] ,
literal[string] :{},
literal[string] : identifier[self] . identifier[prefix] ,
}
)
keyword[return] identifier[self] . identifier[finalizeOp] ( identifier[op] , identifier[account] [ literal[string] ], literal[string] ,** identifier[kwargs] )
|
def approvecommittee(self, committees, account=None, **kwargs):
""" Approve a committee
:param list committees: list of committee member name or id
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
if not account:
if 'default_account' in self.config:
account = self.config['default_account'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not account:
raise ValueError('You need to provide an account') # depends on [control=['if'], data=[]]
account = Account(account, blockchain_instance=self)
options = account['options']
if not isinstance(committees, (list, set, tuple)):
committees = {committees} # depends on [control=['if'], data=[]]
for committee in committees:
committee = Committee(committee, blockchain_instance=self)
options['votes'].append(committee['vote_id']) # depends on [control=['for'], data=['committee']]
options['votes'] = list(set(options['votes']))
options['num_committee'] = len(list(filter(lambda x: float(x.split(':')[0]) == 0, options['votes'])))
op = operations.Account_update(**{'fee': {'amount': 0, 'asset_id': '1.3.0'}, 'account': account['id'], 'new_options': options, 'extensions': {}, 'prefix': self.prefix})
return self.finalizeOp(op, account['name'], 'active', **kwargs)
|
def xmllint_format(xml):
"""
Pretty-print XML like ``xmllint`` does.
Arguments:
xml (string): Serialized XML
"""
parser = ET.XMLParser(resolve_entities=False, strip_cdata=False, remove_blank_text=True)
document = ET.fromstring(xml, parser)
return ('%s\n%s' % ('<?xml version="1.0" encoding="UTF-8"?>', ET.tostring(document, pretty_print=True).decode('utf-8'))).encode('utf-8')
|
def function[xmllint_format, parameter[xml]]:
constant[
Pretty-print XML like ``xmllint`` does.
Arguments:
xml (string): Serialized XML
]
variable[parser] assign[=] call[name[ET].XMLParser, parameter[]]
variable[document] assign[=] call[name[ET].fromstring, parameter[name[xml], name[parser]]]
return[call[binary_operation[constant[%s
%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Constant object at 0x7da20c992530>, <ast.Call object at 0x7da20c991e10>]]].encode, parameter[constant[utf-8]]]]
|
keyword[def] identifier[xmllint_format] ( identifier[xml] ):
literal[string]
identifier[parser] = identifier[ET] . identifier[XMLParser] ( identifier[resolve_entities] = keyword[False] , identifier[strip_cdata] = keyword[False] , identifier[remove_blank_text] = keyword[True] )
identifier[document] = identifier[ET] . identifier[fromstring] ( identifier[xml] , identifier[parser] )
keyword[return] ( literal[string] %( literal[string] , identifier[ET] . identifier[tostring] ( identifier[document] , identifier[pretty_print] = keyword[True] ). identifier[decode] ( literal[string] ))). identifier[encode] ( literal[string] )
|
def xmllint_format(xml):
"""
Pretty-print XML like ``xmllint`` does.
Arguments:
xml (string): Serialized XML
"""
parser = ET.XMLParser(resolve_entities=False, strip_cdata=False, remove_blank_text=True)
document = ET.fromstring(xml, parser)
return ('%s\n%s' % ('<?xml version="1.0" encoding="UTF-8"?>', ET.tostring(document, pretty_print=True).decode('utf-8'))).encode('utf-8')
|
def find_nodes(self, query_dict=None, exact=False, verbose=False, **kwargs):
"""Query on node properties. See documentation for _OTIWrapper class."""
assert self.use_v1
return self._do_query('{p}/singlePropertySearchForTreeNodes'.format(p=self.query_prefix),
query_dict=query_dict,
exact=exact,
verbose=verbose,
valid_keys=self.node_search_term_set,
kwargs=kwargs)
|
def function[find_nodes, parameter[self, query_dict, exact, verbose]]:
constant[Query on node properties. See documentation for _OTIWrapper class.]
assert[name[self].use_v1]
return[call[name[self]._do_query, parameter[call[constant[{p}/singlePropertySearchForTreeNodes].format, parameter[]]]]]
|
keyword[def] identifier[find_nodes] ( identifier[self] , identifier[query_dict] = keyword[None] , identifier[exact] = keyword[False] , identifier[verbose] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[self] . identifier[use_v1]
keyword[return] identifier[self] . identifier[_do_query] ( literal[string] . identifier[format] ( identifier[p] = identifier[self] . identifier[query_prefix] ),
identifier[query_dict] = identifier[query_dict] ,
identifier[exact] = identifier[exact] ,
identifier[verbose] = identifier[verbose] ,
identifier[valid_keys] = identifier[self] . identifier[node_search_term_set] ,
identifier[kwargs] = identifier[kwargs] )
|
def find_nodes(self, query_dict=None, exact=False, verbose=False, **kwargs):
"""Query on node properties. See documentation for _OTIWrapper class."""
assert self.use_v1
return self._do_query('{p}/singlePropertySearchForTreeNodes'.format(p=self.query_prefix), query_dict=query_dict, exact=exact, verbose=verbose, valid_keys=self.node_search_term_set, kwargs=kwargs)
|
def _get_individual_image(self, run, tag, index, sample):
"""
Returns the actual image bytes for a given image.
Args:
run: The name of the run the image belongs to.
tag: The name of the tag the images belongs to.
index: The index of the image in the current reservoir.
sample: The zero-indexed sample of the image to retrieve (for example,
setting `sample` to `2` will fetch the third image sample at `step`).
Returns:
A bytestring of the raw image bytes.
"""
if self._db_connection_provider:
db = self._db_connection_provider()
cursor = db.execute(
'''
SELECT data
FROM TensorStrings
WHERE
/* Skip first 2 elements which are width and height. */
idx = 2 + :sample
AND tensor_rowid = (
SELECT rowid
FROM Tensors
WHERE
series = (
SELECT tag_id
FROM Runs
CROSS JOIN Tags USING (run_id)
WHERE
Runs.run_name = :run
AND Tags.tag_name = :tag)
AND step IS NOT NULL
AND dtype = :dtype
/* Should be n-vector, n >= 3: [width, height, samples...] */
AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3)
ORDER BY step
LIMIT 1
OFFSET :index)
''',
{'run': run,
'tag': tag,
'sample': sample,
'index': index,
'dtype': tf.string.as_datatype_enum})
(data,) = cursor.fetchone()
return six.binary_type(data)
events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample)
images = events[index].tensor_proto.string_val[2:] # skip width, height
return images[sample]
|
def function[_get_individual_image, parameter[self, run, tag, index, sample]]:
constant[
Returns the actual image bytes for a given image.
Args:
run: The name of the run the image belongs to.
tag: The name of the tag the images belongs to.
index: The index of the image in the current reservoir.
sample: The zero-indexed sample of the image to retrieve (for example,
setting `sample` to `2` will fetch the third image sample at `step`).
Returns:
A bytestring of the raw image bytes.
]
if name[self]._db_connection_provider begin[:]
variable[db] assign[=] call[name[self]._db_connection_provider, parameter[]]
variable[cursor] assign[=] call[name[db].execute, parameter[constant[
SELECT data
FROM TensorStrings
WHERE
/* Skip first 2 elements which are width and height. */
idx = 2 + :sample
AND tensor_rowid = (
SELECT rowid
FROM Tensors
WHERE
series = (
SELECT tag_id
FROM Runs
CROSS JOIN Tags USING (run_id)
WHERE
Runs.run_name = :run
AND Tags.tag_name = :tag)
AND step IS NOT NULL
AND dtype = :dtype
/* Should be n-vector, n >= 3: [width, height, samples...] */
AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3)
ORDER BY step
LIMIT 1
OFFSET :index)
], dictionary[[<ast.Constant object at 0x7da1b21a4f10>, <ast.Constant object at 0x7da1b21a60b0>, <ast.Constant object at 0x7da1b21a6dd0>, <ast.Constant object at 0x7da1b21a7340>, <ast.Constant object at 0x7da1b21a77f0>], [<ast.Name object at 0x7da1b21a5630>, <ast.Name object at 0x7da1b21a5ea0>, <ast.Name object at 0x7da1b21a4160>, <ast.Name object at 0x7da1b21a50f0>, <ast.Attribute object at 0x7da1b21a6a70>]]]]
<ast.Tuple object at 0x7da1b21a54e0> assign[=] call[name[cursor].fetchone, parameter[]]
return[call[name[six].binary_type, parameter[name[data]]]]
variable[events] assign[=] call[name[self]._filter_by_sample, parameter[call[name[self]._multiplexer.Tensors, parameter[name[run], name[tag]]], name[sample]]]
variable[images] assign[=] call[call[name[events]][name[index]].tensor_proto.string_val][<ast.Slice object at 0x7da1b21a4bb0>]
return[call[name[images]][name[sample]]]
|
keyword[def] identifier[_get_individual_image] ( identifier[self] , identifier[run] , identifier[tag] , identifier[index] , identifier[sample] ):
literal[string]
keyword[if] identifier[self] . identifier[_db_connection_provider] :
identifier[db] = identifier[self] . identifier[_db_connection_provider] ()
identifier[cursor] = identifier[db] . identifier[execute] (
literal[string] ,
{ literal[string] : identifier[run] ,
literal[string] : identifier[tag] ,
literal[string] : identifier[sample] ,
literal[string] : identifier[index] ,
literal[string] : identifier[tf] . identifier[string] . identifier[as_datatype_enum] })
( identifier[data] ,)= identifier[cursor] . identifier[fetchone] ()
keyword[return] identifier[six] . identifier[binary_type] ( identifier[data] )
identifier[events] = identifier[self] . identifier[_filter_by_sample] ( identifier[self] . identifier[_multiplexer] . identifier[Tensors] ( identifier[run] , identifier[tag] ), identifier[sample] )
identifier[images] = identifier[events] [ identifier[index] ]. identifier[tensor_proto] . identifier[string_val] [ literal[int] :]
keyword[return] identifier[images] [ identifier[sample] ]
|
def _get_individual_image(self, run, tag, index, sample):
"""
Returns the actual image bytes for a given image.
Args:
run: The name of the run the image belongs to.
tag: The name of the tag the images belongs to.
index: The index of the image in the current reservoir.
sample: The zero-indexed sample of the image to retrieve (for example,
setting `sample` to `2` will fetch the third image sample at `step`).
Returns:
A bytestring of the raw image bytes.
"""
if self._db_connection_provider:
db = self._db_connection_provider()
cursor = db.execute("\n SELECT data\n FROM TensorStrings\n WHERE\n /* Skip first 2 elements which are width and height. */\n idx = 2 + :sample\n AND tensor_rowid = (\n SELECT rowid\n FROM Tensors\n WHERE\n series = (\n SELECT tag_id\n FROM Runs\n CROSS JOIN Tags USING (run_id)\n WHERE\n Runs.run_name = :run\n AND Tags.tag_name = :tag)\n AND step IS NOT NULL\n AND dtype = :dtype\n /* Should be n-vector, n >= 3: [width, height, samples...] */\n AND (NOT INSTR(shape, ',') AND CAST (shape AS INT) >= 3)\n ORDER BY step\n LIMIT 1\n OFFSET :index)\n ", {'run': run, 'tag': tag, 'sample': sample, 'index': index, 'dtype': tf.string.as_datatype_enum})
(data,) = cursor.fetchone()
return six.binary_type(data) # depends on [control=['if'], data=[]]
events = self._filter_by_sample(self._multiplexer.Tensors(run, tag), sample)
images = events[index].tensor_proto.string_val[2:] # skip width, height
return images[sample]
|
def make_fakelc(lcfile,
outdir,
magrms=None,
randomizemags=True,
randomizecoords=False,
lcformat='hat-sql',
lcformatdir=None,
timecols=None,
magcols=None,
errcols=None):
'''This preprocesses an input real LC and sets it up to be a fake LC.
Parameters
----------
lcfile : str
This is an input light curve file that will be used to copy over the
time-base. This will be used to generate the time-base for fake light
curves to provide a realistic simulation of the observing window
function.
outdir : str
The output directory where the the fake light curve will be written.
magrms : dict
This is a dict containing the SDSS r mag-RMS (SDSS rmag-MAD preferably)
relation based on all light curves that the input lcfile is from. This
will be used to generate the median mag and noise corresponding to the
magnitude chosen for this fake LC.
randomizemags : bool
If this is True, then a random mag between the first and last magbin in
magrms will be chosen as the median mag for this light curve. This
choice will be weighted by the mag bin probability obtained from the
magrms kwarg. Otherwise, the median mag will be taken from the input
lcfile's lcdict['objectinfo']['sdssr'] key or a transformed SDSS r mag
generated from the input lcfile's lcdict['objectinfo']['jmag'],
['hmag'], and ['kmag'] keys. The magrms relation for each magcol will be
used to generate Gaussian noise at the correct level for the magbin this
light curve's median mag falls into.
randomizecoords : bool
If this is True, will randomize the RA, DEC of the output fake object
and not copy over the RA/DEC from the real input object.
lcformat : str
This is the `formatkey` associated with your input real light curve
format, which you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve specified in `lcfile`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols : list of str or None
The timecol keys to use from the input lcdict in generating the fake
light curve. Fake LCs will be generated for each each
timecol/magcol/errcol combination in the input light curve.
magcols : list of str or None
The magcol keys to use from the input lcdict in generating the fake
light curve. Fake LCs will be generated for each each
timecol/magcol/errcol combination in the input light curve.
errcols : list of str or None
The errcol keys to use from the input lcdict in generating the fake
light curve. Fake LCs will be generated for each each
timecol/magcol/errcol combination in the input light curve.
Returns
-------
tuple
A tuple of the following form is returned::
(fakelc_fpath,
fakelc_lcdict['columns'],
fakelc_lcdict['objectinfo'],
fakelc_lcdict['moments'])
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
# read in the light curve
lcdict = readerfunc(lcfile)
if isinstance(lcdict, tuple) and isinstance(lcdict[0],dict):
lcdict = lcdict[0]
# set up the fakelcdict with a randomly assigned objectid
fakeobjectid = sha512(npr.bytes(12)).hexdigest()[-8:]
fakelcdict = {
'objectid':fakeobjectid,
'objectinfo':{'objectid':fakeobjectid},
'columns':[],
'moments':{},
'origformat':lcformat,
}
# now, get the actual mag of this object and other info and use that to
# populate the corresponding entries of the fakelcdict objectinfo
if ('objectinfo' in lcdict and
isinstance(lcdict['objectinfo'], dict)):
objectinfo = lcdict['objectinfo']
# get the RA
if (not randomizecoords and 'ra' in objectinfo and
objectinfo['ra'] is not None and
np.isfinite(objectinfo['ra'])):
fakelcdict['objectinfo']['ra'] = objectinfo['ra']
else:
# if there's no RA available, we'll assign a random one between 0
# and 360.0
LOGWARNING('%s: assigning a random right ascension' % lcfile)
fakelcdict['objectinfo']['ra'] = npr.random()*360.0
# get the DEC
if (not randomizecoords and 'decl' in objectinfo and
objectinfo['decl'] is not None and
np.isfinite(objectinfo['decl'])):
fakelcdict['objectinfo']['decl'] = objectinfo['decl']
else:
# if there's no DECL available, we'll assign a random one between
# -90.0 and +90.0
LOGWARNING(' %s: assigning a random declination' % lcfile)
fakelcdict['objectinfo']['decl'] = npr.random()*180.0 - 90.0
# get the SDSS r mag for this object
# this will be used for getting the eventual mag-RMS relation later
if ((not randomizemags) and 'sdssr' in objectinfo and
objectinfo['sdssr'] is not None and
np.isfinite(objectinfo['sdssr'])):
fakelcdict['objectinfo']['sdssr'] = objectinfo['sdssr']
# if the SDSS r is unavailable, but we have J, H, K: use those to get
# the SDSS r by using transformations
elif ((not randomizemags) and ('jmag' in objectinfo and
objectinfo['jmag'] is not None and
np.isfinite(objectinfo['jmag'])) and
('hmag' in objectinfo and
objectinfo['hmag'] is not None and
np.isfinite(objectinfo['hmag'])) and
('kmag' in objectinfo and
objectinfo['kmag'] is not None and
np.isfinite(objectinfo['kmag']))):
LOGWARNING('used JHK mags to generate an SDSS r mag for %s' %
lcfile)
fakelcdict['objectinfo']['sdssr'] = jhk_to_sdssr(
objectinfo['jmag'],
objectinfo['hmag'],
objectinfo['kmag']
)
# if there are no mags available or we're specically told to randomize
# them, generate a random mag between 8 and 16.0
elif randomizemags and magrms:
LOGWARNING(' %s: assigning a random mag weighted by mag '
'bin probabilities' % lcfile)
magbins = magrms[magcols[0]]['binned_sdssr_median']
binprobs = magrms[magcols[0]]['magbin_probabilities']
# this is the center of the magbin chosen
magbincenter = npr.choice(magbins,size=1,p=binprobs)
# in this magbin, choose between center and -+ 0.25 mag
chosenmag = (
npr.random()*((magbincenter+0.25) - (magbincenter-0.25)) +
(magbincenter-0.25)
)
fakelcdict['objectinfo']['sdssr'] = np.asscalar(chosenmag)
# if there are no mags available at all, generate a random mag
# between 8 and 16.0
else:
LOGWARNING(' %s: assigning a random mag from '
'uniform distribution between 8.0 and 16.0' % lcfile)
fakelcdict['objectinfo']['sdssr'] = npr.random()*8.0 + 8.0
# if there's no info available, generate fake info
else:
LOGWARNING('no object information found in %s, '
'generating random ra, decl, sdssr' %
lcfile)
fakelcdict['objectinfo']['ra'] = npr.random()*360.0
fakelcdict['objectinfo']['decl'] = npr.random()*180.0 - 90.0
fakelcdict['objectinfo']['sdssr'] = npr.random()*8.0 + 8.0
#
# NOW FILL IN THE TIMES, MAGS, ERRS
#
# get the time columns
for tcind, tcol in enumerate(timecols):
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
if tcol not in fakelcdict:
fakelcdict[tcol] = _dict_get(lcdict, tcolget)
fakelcdict['columns'].append(tcol)
# update the ndet with the first time column's size. it's possible
# that different time columns have different lengths, but that would
# be weird and we won't deal with it for now
if tcind == 0:
fakelcdict['objectinfo']['ndet'] = fakelcdict[tcol].size
# get the mag columns
for mcol in magcols:
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
# put the mcol in only once
if mcol not in fakelcdict:
measuredmags = _dict_get(lcdict, mcolget)
measuredmags = measuredmags[np.isfinite(measuredmags)]
# if we're randomizing, get the mags from the interpolated mag-RMS
# relation
if (randomizemags and magrms and mcol in magrms and
'interpolated_magmad' in magrms[mcol] and
magrms[mcol]['interpolated_magmad'] is not None):
interpfunc = magrms[mcol]['interpolated_magmad']
lcmad = interpfunc(fakelcdict['objectinfo']['sdssr'])
fakelcdict['moments'][mcol] = {
'median': fakelcdict['objectinfo']['sdssr'],
'mad': lcmad
}
# if we're not randomizing, get the median and MAD from the light
# curve itself
else:
# we require at least 10 finite measurements
if measuredmags.size > 9:
measuredmedian = np.median(measuredmags)
measuredmad = np.median(
np.abs(measuredmags - measuredmedian)
)
fakelcdict['moments'][mcol] = {'median':measuredmedian,
'mad':measuredmad}
# if there aren't enough measurements in this LC, try to get the
# median and RMS from the interpolated mag-RMS relation first
else:
if (magrms and mcol in magrms and
'interpolated_magmad' in magrms[mcol] and
magrms[mcol]['interpolated_magmad'] is not None):
LOGWARNING(
'input LC %s does not have enough '
'finite measurements, '
'generating mag moments from '
'fakelc sdssr and the mag-RMS relation' % lcfile
)
interpfunc = magrms[mcol]['interpolated_magmad']
lcmad = interpfunc(fakelcdict['objectinfo']['sdssr'])
fakelcdict['moments'][mcol] = {
'median': fakelcdict['objectinfo']['sdssr'],
'mad': lcmad
}
# if we don't have the mag-RMS relation either, then we
# can't do anything for this light curve, generate a random
# MAD between 5e-4 and 0.1
else:
LOGWARNING(
'input LC %s does not have enough '
'finite measurements and '
'no mag-RMS relation provided '
'assigning a random MAD between 5.0e-4 and 0.1'
% lcfile
)
fakelcdict['moments'][mcol] = {
'median':fakelcdict['objectinfo']['sdssr'],
'mad':npr.random()*(0.1 - 5.0e-4) + 5.0e-4
}
# the magnitude column is set to all zeros initially. this will be
# filled in by the add_fakelc_variability function below
fakelcdict[mcol] = np.full_like(_dict_get(lcdict, mcolget), 0.0)
fakelcdict['columns'].append(mcol)
# get the err columns
for mcol, ecol in zip(magcols, errcols):
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
if ecol not in fakelcdict:
measurederrs = _dict_get(lcdict, ecolget)
measurederrs = measurederrs[np.isfinite(measurederrs)]
# if we're randomizing, get the errs from the interpolated mag-RMS
# relation
if (randomizemags and magrms and mcol in magrms and
'interpolated_magmad' in magrms[mcol] and
magrms[mcol]['interpolated_magmad'] is not None):
interpfunc = magrms[mcol]['interpolated_magmad']
lcmad = interpfunc(fakelcdict['objectinfo']['sdssr'])
# the median of the errs = lcmad
# the mad of the errs is 0.1 x lcmad
fakelcdict['moments'][ecol] = {
'median': lcmad,
'mad': 0.1*lcmad
}
else:
# we require at least 10 finite measurements
# we'll calculate the median and MAD of the errs to use later on
if measurederrs.size > 9:
measuredmedian = np.median(measurederrs)
measuredmad = np.median(
np.abs(measurederrs - measuredmedian)
)
fakelcdict['moments'][ecol] = {'median':measuredmedian,
'mad':measuredmad}
else:
if (magrms and mcol in magrms and
'interpolated_magmad' in magrms[mcol] and
magrms[mcol]['interpolated_magmad'] is not None):
LOGWARNING(
'input LC %s does not have enough '
'finite measurements, '
'generating err moments from '
'the mag-RMS relation' % lcfile
)
interpfunc = magrms[mcol]['interpolated_magmad']
lcmad = interpfunc(fakelcdict['objectinfo']['sdssr'])
fakelcdict['moments'][ecol] = {
'median': lcmad,
'mad': 0.1*lcmad
}
# if we don't have the mag-RMS relation either, then we
# can't do anything for this light curve, generate a random
# MAD between 5e-4 and 0.1
else:
LOGWARNING(
'input LC %s does not have '
'enough finite measurements and '
'no mag-RMS relation provided, '
'generating errs randomly' % lcfile
)
fakelcdict['moments'][ecol] = {
'median':npr.random()*(0.01 - 5.0e-4) + 5.0e-4,
'mad':npr.random()*(0.01 - 5.0e-4) + 5.0e-4
}
# the errors column is set to all zeros initially. this will be
# filled in by the add_fakelc_variability function below.
fakelcdict[ecol] = np.full_like(_dict_get(lcdict, ecolget), 0.0)
fakelcdict['columns'].append(ecol)
# add the timecols, magcols, errcols to the lcdict
fakelcdict['timecols'] = timecols
fakelcdict['magcols'] = magcols
fakelcdict['errcols'] = errcols
# generate an output file name
fakelcfname = '%s-fakelc.pkl' % fakelcdict['objectid']
fakelcfpath = os.path.abspath(os.path.join(outdir, fakelcfname))
# write this out to the output directory
with open(fakelcfpath,'wb') as outfd:
pickle.dump(fakelcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
# return the fakelc path, its columns, info, and moments so we can put them
# into a collection DB later on
LOGINFO('real LC %s -> fake LC %s OK' % (lcfile, fakelcfpath))
return (fakelcfpath, fakelcdict['columns'],
fakelcdict['objectinfo'], fakelcdict['moments'])
|
def function[make_fakelc, parameter[lcfile, outdir, magrms, randomizemags, randomizecoords, lcformat, lcformatdir, timecols, magcols, errcols]]:
constant[This preprocesses an input real LC and sets it up to be a fake LC.
Parameters
----------
lcfile : str
This is an input light curve file that will be used to copy over the
time-base. This will be used to generate the time-base for fake light
curves to provide a realistic simulation of the observing window
function.
outdir : str
The output directory where the the fake light curve will be written.
magrms : dict
This is a dict containing the SDSS r mag-RMS (SDSS rmag-MAD preferably)
relation based on all light curves that the input lcfile is from. This
will be used to generate the median mag and noise corresponding to the
magnitude chosen for this fake LC.
randomizemags : bool
If this is True, then a random mag between the first and last magbin in
magrms will be chosen as the median mag for this light curve. This
choice will be weighted by the mag bin probability obtained from the
magrms kwarg. Otherwise, the median mag will be taken from the input
lcfile's lcdict['objectinfo']['sdssr'] key or a transformed SDSS r mag
generated from the input lcfile's lcdict['objectinfo']['jmag'],
['hmag'], and ['kmag'] keys. The magrms relation for each magcol will be
used to generate Gaussian noise at the correct level for the magbin this
light curve's median mag falls into.
randomizecoords : bool
If this is True, will randomize the RA, DEC of the output fake object
and not copy over the RA/DEC from the real input object.
lcformat : str
This is the `formatkey` associated with your input real light curve
format, which you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve specified in `lcfile`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols : list of str or None
The timecol keys to use from the input lcdict in generating the fake
light curve. Fake LCs will be generated for each each
timecol/magcol/errcol combination in the input light curve.
magcols : list of str or None
The magcol keys to use from the input lcdict in generating the fake
light curve. Fake LCs will be generated for each each
timecol/magcol/errcol combination in the input light curve.
errcols : list of str or None
The errcol keys to use from the input lcdict in generating the fake
light curve. Fake LCs will be generated for each each
timecol/magcol/errcol combination in the input light curve.
Returns
-------
tuple
A tuple of the following form is returned::
(fakelc_fpath,
fakelc_lcdict['columns'],
fakelc_lcdict['objectinfo'],
fakelc_lcdict['moments'])
]
<ast.Try object at 0x7da18eb555a0>
if compare[name[timecols] is constant[None]] begin[:]
variable[timecols] assign[=] name[dtimecols]
if compare[name[magcols] is constant[None]] begin[:]
variable[magcols] assign[=] name[dmagcols]
if compare[name[errcols] is constant[None]] begin[:]
variable[errcols] assign[=] name[derrcols]
variable[lcdict] assign[=] call[name[readerfunc], parameter[name[lcfile]]]
if <ast.BoolOp object at 0x7da18eb549d0> begin[:]
variable[lcdict] assign[=] call[name[lcdict]][constant[0]]
variable[fakeobjectid] assign[=] call[call[call[name[sha512], parameter[call[name[npr].bytes, parameter[constant[12]]]]].hexdigest, parameter[]]][<ast.Slice object at 0x7da18eb56110>]
variable[fakelcdict] assign[=] dictionary[[<ast.Constant object at 0x7da18eb57070>, <ast.Constant object at 0x7da18eb56170>, <ast.Constant object at 0x7da18eb566e0>, <ast.Constant object at 0x7da18eb549a0>, <ast.Constant object at 0x7da18eb54520>], [<ast.Name object at 0x7da18eb54f10>, <ast.Dict object at 0x7da18eb54fd0>, <ast.List object at 0x7da18eb54c40>, <ast.Dict object at 0x7da18eb55570>, <ast.Name object at 0x7da18eb55ea0>]]
if <ast.BoolOp object at 0x7da18eb553f0> begin[:]
variable[objectinfo] assign[=] call[name[lcdict]][constant[objectinfo]]
if <ast.BoolOp object at 0x7da18eb545e0> begin[:]
call[call[name[fakelcdict]][constant[objectinfo]]][constant[ra]] assign[=] call[name[objectinfo]][constant[ra]]
if <ast.BoolOp object at 0x7da18eb54a90> begin[:]
call[call[name[fakelcdict]][constant[objectinfo]]][constant[decl]] assign[=] call[name[objectinfo]][constant[decl]]
if <ast.BoolOp object at 0x7da18eb54af0> begin[:]
call[call[name[fakelcdict]][constant[objectinfo]]][constant[sdssr]] assign[=] call[name[objectinfo]][constant[sdssr]]
for taget[tuple[[<ast.Name object at 0x7da18f00f730>, <ast.Name object at 0x7da18f00f490>]]] in starred[call[name[enumerate], parameter[name[timecols]]]] begin[:]
if compare[constant[.] in name[tcol]] begin[:]
variable[tcolget] assign[=] call[name[tcol].split, parameter[constant[.]]]
if compare[name[tcol] <ast.NotIn object at 0x7da2590d7190> name[fakelcdict]] begin[:]
call[name[fakelcdict]][name[tcol]] assign[=] call[name[_dict_get], parameter[name[lcdict], name[tcolget]]]
call[call[name[fakelcdict]][constant[columns]].append, parameter[name[tcol]]]
if compare[name[tcind] equal[==] constant[0]] begin[:]
call[call[name[fakelcdict]][constant[objectinfo]]][constant[ndet]] assign[=] call[name[fakelcdict]][name[tcol]].size
for taget[name[mcol]] in starred[name[magcols]] begin[:]
if compare[constant[.] in name[mcol]] begin[:]
variable[mcolget] assign[=] call[name[mcol].split, parameter[constant[.]]]
if compare[name[mcol] <ast.NotIn object at 0x7da2590d7190> name[fakelcdict]] begin[:]
variable[measuredmags] assign[=] call[name[_dict_get], parameter[name[lcdict], name[mcolget]]]
variable[measuredmags] assign[=] call[name[measuredmags]][call[name[np].isfinite, parameter[name[measuredmags]]]]
if <ast.BoolOp object at 0x7da18f00f250> begin[:]
variable[interpfunc] assign[=] call[call[name[magrms]][name[mcol]]][constant[interpolated_magmad]]
variable[lcmad] assign[=] call[name[interpfunc], parameter[call[call[name[fakelcdict]][constant[objectinfo]]][constant[sdssr]]]]
call[call[name[fakelcdict]][constant[moments]]][name[mcol]] assign[=] dictionary[[<ast.Constant object at 0x7da18f00d6f0>, <ast.Constant object at 0x7da18f00fd00>], [<ast.Subscript object at 0x7da18f00f040>, <ast.Name object at 0x7da18f00dc90>]]
call[name[fakelcdict]][name[mcol]] assign[=] call[name[np].full_like, parameter[call[name[_dict_get], parameter[name[lcdict], name[mcolget]]], constant[0.0]]]
call[call[name[fakelcdict]][constant[columns]].append, parameter[name[mcol]]]
for taget[tuple[[<ast.Name object at 0x7da2054a4c10>, <ast.Name object at 0x7da2054a5870>]]] in starred[call[name[zip], parameter[name[magcols], name[errcols]]]] begin[:]
if compare[constant[.] in name[ecol]] begin[:]
variable[ecolget] assign[=] call[name[ecol].split, parameter[constant[.]]]
if compare[name[ecol] <ast.NotIn object at 0x7da2590d7190> name[fakelcdict]] begin[:]
variable[measurederrs] assign[=] call[name[_dict_get], parameter[name[lcdict], name[ecolget]]]
variable[measurederrs] assign[=] call[name[measurederrs]][call[name[np].isfinite, parameter[name[measurederrs]]]]
if <ast.BoolOp object at 0x7da2054a5b10> begin[:]
variable[interpfunc] assign[=] call[call[name[magrms]][name[mcol]]][constant[interpolated_magmad]]
variable[lcmad] assign[=] call[name[interpfunc], parameter[call[call[name[fakelcdict]][constant[objectinfo]]][constant[sdssr]]]]
call[call[name[fakelcdict]][constant[moments]]][name[ecol]] assign[=] dictionary[[<ast.Constant object at 0x7da2054a7340>, <ast.Constant object at 0x7da2054a7bb0>], [<ast.Name object at 0x7da2054a6c20>, <ast.BinOp object at 0x7da2054a5090>]]
call[name[fakelcdict]][name[ecol]] assign[=] call[name[np].full_like, parameter[call[name[_dict_get], parameter[name[lcdict], name[ecolget]]], constant[0.0]]]
call[call[name[fakelcdict]][constant[columns]].append, parameter[name[ecol]]]
call[name[fakelcdict]][constant[timecols]] assign[=] name[timecols]
call[name[fakelcdict]][constant[magcols]] assign[=] name[magcols]
call[name[fakelcdict]][constant[errcols]] assign[=] name[errcols]
variable[fakelcfname] assign[=] binary_operation[constant[%s-fakelc.pkl] <ast.Mod object at 0x7da2590d6920> call[name[fakelcdict]][constant[objectid]]]
variable[fakelcfpath] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[name[outdir], name[fakelcfname]]]]]
with call[name[open], parameter[name[fakelcfpath], constant[wb]]] begin[:]
call[name[pickle].dump, parameter[name[fakelcdict], name[outfd]]]
call[name[LOGINFO], parameter[binary_operation[constant[real LC %s -> fake LC %s OK] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204347760>, <ast.Name object at 0x7da204346710>]]]]]
return[tuple[[<ast.Name object at 0x7da204346d10>, <ast.Subscript object at 0x7da204347130>, <ast.Subscript object at 0x7da204345a50>, <ast.Subscript object at 0x7da204346ad0>]]]
|
keyword[def] identifier[make_fakelc] ( identifier[lcfile] ,
identifier[outdir] ,
identifier[magrms] = keyword[None] ,
identifier[randomizemags] = keyword[True] ,
identifier[randomizecoords] = keyword[False] ,
identifier[lcformat] = literal[string] ,
identifier[lcformatdir] = keyword[None] ,
identifier[timecols] = keyword[None] ,
identifier[magcols] = keyword[None] ,
identifier[errcols] = keyword[None] ):
literal[string]
keyword[try] :
identifier[formatinfo] = identifier[get_lcformat] ( identifier[lcformat] ,
identifier[use_lcformat_dir] = identifier[lcformatdir] )
keyword[if] identifier[formatinfo] :
( identifier[fileglob] , identifier[readerfunc] ,
identifier[dtimecols] , identifier[dmagcols] , identifier[derrcols] ,
identifier[magsarefluxes] , identifier[normfunc] )= identifier[formatinfo]
keyword[else] :
identifier[LOGERROR] ( literal[string] )
keyword[return] keyword[None]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[LOGEXCEPTION] ( literal[string] )
keyword[return] keyword[None]
keyword[if] identifier[timecols] keyword[is] keyword[None] :
identifier[timecols] = identifier[dtimecols]
keyword[if] identifier[magcols] keyword[is] keyword[None] :
identifier[magcols] = identifier[dmagcols]
keyword[if] identifier[errcols] keyword[is] keyword[None] :
identifier[errcols] = identifier[derrcols]
identifier[lcdict] = identifier[readerfunc] ( identifier[lcfile] )
keyword[if] identifier[isinstance] ( identifier[lcdict] , identifier[tuple] ) keyword[and] identifier[isinstance] ( identifier[lcdict] [ literal[int] ], identifier[dict] ):
identifier[lcdict] = identifier[lcdict] [ literal[int] ]
identifier[fakeobjectid] = identifier[sha512] ( identifier[npr] . identifier[bytes] ( literal[int] )). identifier[hexdigest] ()[- literal[int] :]
identifier[fakelcdict] ={
literal[string] : identifier[fakeobjectid] ,
literal[string] :{ literal[string] : identifier[fakeobjectid] },
literal[string] :[],
literal[string] :{},
literal[string] : identifier[lcformat] ,
}
keyword[if] ( literal[string] keyword[in] identifier[lcdict] keyword[and]
identifier[isinstance] ( identifier[lcdict] [ literal[string] ], identifier[dict] )):
identifier[objectinfo] = identifier[lcdict] [ literal[string] ]
keyword[if] ( keyword[not] identifier[randomizecoords] keyword[and] literal[string] keyword[in] identifier[objectinfo] keyword[and]
identifier[objectinfo] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[np] . identifier[isfinite] ( identifier[objectinfo] [ literal[string] ])):
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[objectinfo] [ literal[string] ]
keyword[else] :
identifier[LOGWARNING] ( literal[string] % identifier[lcfile] )
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[npr] . identifier[random] ()* literal[int]
keyword[if] ( keyword[not] identifier[randomizecoords] keyword[and] literal[string] keyword[in] identifier[objectinfo] keyword[and]
identifier[objectinfo] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[np] . identifier[isfinite] ( identifier[objectinfo] [ literal[string] ])):
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[objectinfo] [ literal[string] ]
keyword[else] :
identifier[LOGWARNING] ( literal[string] % identifier[lcfile] )
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[npr] . identifier[random] ()* literal[int] - literal[int]
keyword[if] (( keyword[not] identifier[randomizemags] ) keyword[and] literal[string] keyword[in] identifier[objectinfo] keyword[and]
identifier[objectinfo] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[np] . identifier[isfinite] ( identifier[objectinfo] [ literal[string] ])):
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[objectinfo] [ literal[string] ]
keyword[elif] (( keyword[not] identifier[randomizemags] ) keyword[and] ( literal[string] keyword[in] identifier[objectinfo] keyword[and]
identifier[objectinfo] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[np] . identifier[isfinite] ( identifier[objectinfo] [ literal[string] ])) keyword[and]
( literal[string] keyword[in] identifier[objectinfo] keyword[and]
identifier[objectinfo] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[np] . identifier[isfinite] ( identifier[objectinfo] [ literal[string] ])) keyword[and]
( literal[string] keyword[in] identifier[objectinfo] keyword[and]
identifier[objectinfo] [ literal[string] ] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[np] . identifier[isfinite] ( identifier[objectinfo] [ literal[string] ]))):
identifier[LOGWARNING] ( literal[string] %
identifier[lcfile] )
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[jhk_to_sdssr] (
identifier[objectinfo] [ literal[string] ],
identifier[objectinfo] [ literal[string] ],
identifier[objectinfo] [ literal[string] ]
)
keyword[elif] identifier[randomizemags] keyword[and] identifier[magrms] :
identifier[LOGWARNING] ( literal[string]
literal[string] % identifier[lcfile] )
identifier[magbins] = identifier[magrms] [ identifier[magcols] [ literal[int] ]][ literal[string] ]
identifier[binprobs] = identifier[magrms] [ identifier[magcols] [ literal[int] ]][ literal[string] ]
identifier[magbincenter] = identifier[npr] . identifier[choice] ( identifier[magbins] , identifier[size] = literal[int] , identifier[p] = identifier[binprobs] )
identifier[chosenmag] =(
identifier[npr] . identifier[random] ()*(( identifier[magbincenter] + literal[int] )-( identifier[magbincenter] - literal[int] ))+
( identifier[magbincenter] - literal[int] )
)
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[np] . identifier[asscalar] ( identifier[chosenmag] )
keyword[else] :
identifier[LOGWARNING] ( literal[string]
literal[string] % identifier[lcfile] )
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[npr] . identifier[random] ()* literal[int] + literal[int]
keyword[else] :
identifier[LOGWARNING] ( literal[string]
literal[string] %
identifier[lcfile] )
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[npr] . identifier[random] ()* literal[int]
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[npr] . identifier[random] ()* literal[int] - literal[int]
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[npr] . identifier[random] ()* literal[int] + literal[int]
keyword[for] identifier[tcind] , identifier[tcol] keyword[in] identifier[enumerate] ( identifier[timecols] ):
keyword[if] literal[string] keyword[in] identifier[tcol] :
identifier[tcolget] = identifier[tcol] . identifier[split] ( literal[string] )
keyword[else] :
identifier[tcolget] =[ identifier[tcol] ]
keyword[if] identifier[tcol] keyword[not] keyword[in] identifier[fakelcdict] :
identifier[fakelcdict] [ identifier[tcol] ]= identifier[_dict_get] ( identifier[lcdict] , identifier[tcolget] )
identifier[fakelcdict] [ literal[string] ]. identifier[append] ( identifier[tcol] )
keyword[if] identifier[tcind] == literal[int] :
identifier[fakelcdict] [ literal[string] ][ literal[string] ]= identifier[fakelcdict] [ identifier[tcol] ]. identifier[size]
keyword[for] identifier[mcol] keyword[in] identifier[magcols] :
keyword[if] literal[string] keyword[in] identifier[mcol] :
identifier[mcolget] = identifier[mcol] . identifier[split] ( literal[string] )
keyword[else] :
identifier[mcolget] =[ identifier[mcol] ]
keyword[if] identifier[mcol] keyword[not] keyword[in] identifier[fakelcdict] :
identifier[measuredmags] = identifier[_dict_get] ( identifier[lcdict] , identifier[mcolget] )
identifier[measuredmags] = identifier[measuredmags] [ identifier[np] . identifier[isfinite] ( identifier[measuredmags] )]
keyword[if] ( identifier[randomizemags] keyword[and] identifier[magrms] keyword[and] identifier[mcol] keyword[in] identifier[magrms] keyword[and]
literal[string] keyword[in] identifier[magrms] [ identifier[mcol] ] keyword[and]
identifier[magrms] [ identifier[mcol] ][ literal[string] ] keyword[is] keyword[not] keyword[None] ):
identifier[interpfunc] = identifier[magrms] [ identifier[mcol] ][ literal[string] ]
identifier[lcmad] = identifier[interpfunc] ( identifier[fakelcdict] [ literal[string] ][ literal[string] ])
identifier[fakelcdict] [ literal[string] ][ identifier[mcol] ]={
literal[string] : identifier[fakelcdict] [ literal[string] ][ literal[string] ],
literal[string] : identifier[lcmad]
}
keyword[else] :
keyword[if] identifier[measuredmags] . identifier[size] > literal[int] :
identifier[measuredmedian] = identifier[np] . identifier[median] ( identifier[measuredmags] )
identifier[measuredmad] = identifier[np] . identifier[median] (
identifier[np] . identifier[abs] ( identifier[measuredmags] - identifier[measuredmedian] )
)
identifier[fakelcdict] [ literal[string] ][ identifier[mcol] ]={ literal[string] : identifier[measuredmedian] ,
literal[string] : identifier[measuredmad] }
keyword[else] :
keyword[if] ( identifier[magrms] keyword[and] identifier[mcol] keyword[in] identifier[magrms] keyword[and]
literal[string] keyword[in] identifier[magrms] [ identifier[mcol] ] keyword[and]
identifier[magrms] [ identifier[mcol] ][ literal[string] ] keyword[is] keyword[not] keyword[None] ):
identifier[LOGWARNING] (
literal[string]
literal[string]
literal[string]
literal[string] % identifier[lcfile]
)
identifier[interpfunc] = identifier[magrms] [ identifier[mcol] ][ literal[string] ]
identifier[lcmad] = identifier[interpfunc] ( identifier[fakelcdict] [ literal[string] ][ literal[string] ])
identifier[fakelcdict] [ literal[string] ][ identifier[mcol] ]={
literal[string] : identifier[fakelcdict] [ literal[string] ][ literal[string] ],
literal[string] : identifier[lcmad]
}
keyword[else] :
identifier[LOGWARNING] (
literal[string]
literal[string]
literal[string]
literal[string]
% identifier[lcfile]
)
identifier[fakelcdict] [ literal[string] ][ identifier[mcol] ]={
literal[string] : identifier[fakelcdict] [ literal[string] ][ literal[string] ],
literal[string] : identifier[npr] . identifier[random] ()*( literal[int] - literal[int] )+ literal[int]
}
identifier[fakelcdict] [ identifier[mcol] ]= identifier[np] . identifier[full_like] ( identifier[_dict_get] ( identifier[lcdict] , identifier[mcolget] ), literal[int] )
identifier[fakelcdict] [ literal[string] ]. identifier[append] ( identifier[mcol] )
keyword[for] identifier[mcol] , identifier[ecol] keyword[in] identifier[zip] ( identifier[magcols] , identifier[errcols] ):
keyword[if] literal[string] keyword[in] identifier[ecol] :
identifier[ecolget] = identifier[ecol] . identifier[split] ( literal[string] )
keyword[else] :
identifier[ecolget] =[ identifier[ecol] ]
keyword[if] identifier[ecol] keyword[not] keyword[in] identifier[fakelcdict] :
identifier[measurederrs] = identifier[_dict_get] ( identifier[lcdict] , identifier[ecolget] )
identifier[measurederrs] = identifier[measurederrs] [ identifier[np] . identifier[isfinite] ( identifier[measurederrs] )]
keyword[if] ( identifier[randomizemags] keyword[and] identifier[magrms] keyword[and] identifier[mcol] keyword[in] identifier[magrms] keyword[and]
literal[string] keyword[in] identifier[magrms] [ identifier[mcol] ] keyword[and]
identifier[magrms] [ identifier[mcol] ][ literal[string] ] keyword[is] keyword[not] keyword[None] ):
identifier[interpfunc] = identifier[magrms] [ identifier[mcol] ][ literal[string] ]
identifier[lcmad] = identifier[interpfunc] ( identifier[fakelcdict] [ literal[string] ][ literal[string] ])
identifier[fakelcdict] [ literal[string] ][ identifier[ecol] ]={
literal[string] : identifier[lcmad] ,
literal[string] : literal[int] * identifier[lcmad]
}
keyword[else] :
keyword[if] identifier[measurederrs] . identifier[size] > literal[int] :
identifier[measuredmedian] = identifier[np] . identifier[median] ( identifier[measurederrs] )
identifier[measuredmad] = identifier[np] . identifier[median] (
identifier[np] . identifier[abs] ( identifier[measurederrs] - identifier[measuredmedian] )
)
identifier[fakelcdict] [ literal[string] ][ identifier[ecol] ]={ literal[string] : identifier[measuredmedian] ,
literal[string] : identifier[measuredmad] }
keyword[else] :
keyword[if] ( identifier[magrms] keyword[and] identifier[mcol] keyword[in] identifier[magrms] keyword[and]
literal[string] keyword[in] identifier[magrms] [ identifier[mcol] ] keyword[and]
identifier[magrms] [ identifier[mcol] ][ literal[string] ] keyword[is] keyword[not] keyword[None] ):
identifier[LOGWARNING] (
literal[string]
literal[string]
literal[string]
literal[string] % identifier[lcfile]
)
identifier[interpfunc] = identifier[magrms] [ identifier[mcol] ][ literal[string] ]
identifier[lcmad] = identifier[interpfunc] ( identifier[fakelcdict] [ literal[string] ][ literal[string] ])
identifier[fakelcdict] [ literal[string] ][ identifier[ecol] ]={
literal[string] : identifier[lcmad] ,
literal[string] : literal[int] * identifier[lcmad]
}
keyword[else] :
identifier[LOGWARNING] (
literal[string]
literal[string]
literal[string]
literal[string] % identifier[lcfile]
)
identifier[fakelcdict] [ literal[string] ][ identifier[ecol] ]={
literal[string] : identifier[npr] . identifier[random] ()*( literal[int] - literal[int] )+ literal[int] ,
literal[string] : identifier[npr] . identifier[random] ()*( literal[int] - literal[int] )+ literal[int]
}
identifier[fakelcdict] [ identifier[ecol] ]= identifier[np] . identifier[full_like] ( identifier[_dict_get] ( identifier[lcdict] , identifier[ecolget] ), literal[int] )
identifier[fakelcdict] [ literal[string] ]. identifier[append] ( identifier[ecol] )
identifier[fakelcdict] [ literal[string] ]= identifier[timecols]
identifier[fakelcdict] [ literal[string] ]= identifier[magcols]
identifier[fakelcdict] [ literal[string] ]= identifier[errcols]
identifier[fakelcfname] = literal[string] % identifier[fakelcdict] [ literal[string] ]
identifier[fakelcfpath] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[outdir] , identifier[fakelcfname] ))
keyword[with] identifier[open] ( identifier[fakelcfpath] , literal[string] ) keyword[as] identifier[outfd] :
identifier[pickle] . identifier[dump] ( identifier[fakelcdict] , identifier[outfd] , identifier[protocol] = identifier[pickle] . identifier[HIGHEST_PROTOCOL] )
identifier[LOGINFO] ( literal[string] %( identifier[lcfile] , identifier[fakelcfpath] ))
keyword[return] ( identifier[fakelcfpath] , identifier[fakelcdict] [ literal[string] ],
identifier[fakelcdict] [ literal[string] ], identifier[fakelcdict] [ literal[string] ])
|
def make_fakelc(lcfile, outdir, magrms=None, randomizemags=True, randomizecoords=False, lcformat='hat-sql', lcformatdir=None, timecols=None, magcols=None, errcols=None):
"""This preprocesses an input real LC and sets it up to be a fake LC.
Parameters
----------
lcfile : str
This is an input light curve file that will be used to copy over the
time-base. This will be used to generate the time-base for fake light
curves to provide a realistic simulation of the observing window
function.
outdir : str
The output directory where the the fake light curve will be written.
magrms : dict
This is a dict containing the SDSS r mag-RMS (SDSS rmag-MAD preferably)
relation based on all light curves that the input lcfile is from. This
will be used to generate the median mag and noise corresponding to the
magnitude chosen for this fake LC.
randomizemags : bool
If this is True, then a random mag between the first and last magbin in
magrms will be chosen as the median mag for this light curve. This
choice will be weighted by the mag bin probability obtained from the
magrms kwarg. Otherwise, the median mag will be taken from the input
lcfile's lcdict['objectinfo']['sdssr'] key or a transformed SDSS r mag
generated from the input lcfile's lcdict['objectinfo']['jmag'],
['hmag'], and ['kmag'] keys. The magrms relation for each magcol will be
used to generate Gaussian noise at the correct level for the magbin this
light curve's median mag falls into.
randomizecoords : bool
If this is True, will randomize the RA, DEC of the output fake object
and not copy over the RA/DEC from the real input object.
lcformat : str
This is the `formatkey` associated with your input real light curve
format, which you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve specified in `lcfile`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols : list of str or None
The timecol keys to use from the input lcdict in generating the fake
light curve. Fake LCs will be generated for each each
timecol/magcol/errcol combination in the input light curve.
magcols : list of str or None
The magcol keys to use from the input lcdict in generating the fake
light curve. Fake LCs will be generated for each each
timecol/magcol/errcol combination in the input light curve.
errcols : list of str or None
The errcol keys to use from the input lcdict in generating the fake
light curve. Fake LCs will be generated for each each
timecol/magcol/errcol combination in the input light curve.
Returns
-------
tuple
A tuple of the following form is returned::
(fakelc_fpath,
fakelc_lcdict['columns'],
fakelc_lcdict['objectinfo'],
fakelc_lcdict['moments'])
"""
try:
formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo # depends on [control=['if'], data=[]]
else:
LOGERROR("can't figure out the light curve format")
return None # depends on [control=['try'], data=[]]
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None # depends on [control=['except'], data=[]]
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols # depends on [control=['if'], data=['timecols']]
if magcols is None:
magcols = dmagcols # depends on [control=['if'], data=['magcols']]
if errcols is None:
errcols = derrcols # depends on [control=['if'], data=['errcols']]
# read in the light curve
lcdict = readerfunc(lcfile)
if isinstance(lcdict, tuple) and isinstance(lcdict[0], dict):
lcdict = lcdict[0] # depends on [control=['if'], data=[]]
# set up the fakelcdict with a randomly assigned objectid
fakeobjectid = sha512(npr.bytes(12)).hexdigest()[-8:]
fakelcdict = {'objectid': fakeobjectid, 'objectinfo': {'objectid': fakeobjectid}, 'columns': [], 'moments': {}, 'origformat': lcformat}
# now, get the actual mag of this object and other info and use that to
# populate the corresponding entries of the fakelcdict objectinfo
if 'objectinfo' in lcdict and isinstance(lcdict['objectinfo'], dict):
objectinfo = lcdict['objectinfo']
# get the RA
if not randomizecoords and 'ra' in objectinfo and (objectinfo['ra'] is not None) and np.isfinite(objectinfo['ra']):
fakelcdict['objectinfo']['ra'] = objectinfo['ra'] # depends on [control=['if'], data=[]]
else:
# if there's no RA available, we'll assign a random one between 0
# and 360.0
LOGWARNING('%s: assigning a random right ascension' % lcfile)
fakelcdict['objectinfo']['ra'] = npr.random() * 360.0
# get the DEC
if not randomizecoords and 'decl' in objectinfo and (objectinfo['decl'] is not None) and np.isfinite(objectinfo['decl']):
fakelcdict['objectinfo']['decl'] = objectinfo['decl'] # depends on [control=['if'], data=[]]
else:
# if there's no DECL available, we'll assign a random one between
# -90.0 and +90.0
LOGWARNING(' %s: assigning a random declination' % lcfile)
fakelcdict['objectinfo']['decl'] = npr.random() * 180.0 - 90.0
# get the SDSS r mag for this object
# this will be used for getting the eventual mag-RMS relation later
if not randomizemags and 'sdssr' in objectinfo and (objectinfo['sdssr'] is not None) and np.isfinite(objectinfo['sdssr']):
fakelcdict['objectinfo']['sdssr'] = objectinfo['sdssr'] # depends on [control=['if'], data=[]]
# if the SDSS r is unavailable, but we have J, H, K: use those to get
# the SDSS r by using transformations
elif not randomizemags and ('jmag' in objectinfo and objectinfo['jmag'] is not None and np.isfinite(objectinfo['jmag'])) and ('hmag' in objectinfo and objectinfo['hmag'] is not None and np.isfinite(objectinfo['hmag'])) and ('kmag' in objectinfo and objectinfo['kmag'] is not None and np.isfinite(objectinfo['kmag'])):
LOGWARNING('used JHK mags to generate an SDSS r mag for %s' % lcfile)
fakelcdict['objectinfo']['sdssr'] = jhk_to_sdssr(objectinfo['jmag'], objectinfo['hmag'], objectinfo['kmag']) # depends on [control=['if'], data=[]]
# if there are no mags available or we're specically told to randomize
# them, generate a random mag between 8 and 16.0
elif randomizemags and magrms:
LOGWARNING(' %s: assigning a random mag weighted by mag bin probabilities' % lcfile)
magbins = magrms[magcols[0]]['binned_sdssr_median']
binprobs = magrms[magcols[0]]['magbin_probabilities']
# this is the center of the magbin chosen
magbincenter = npr.choice(magbins, size=1, p=binprobs)
# in this magbin, choose between center and -+ 0.25 mag
chosenmag = npr.random() * (magbincenter + 0.25 - (magbincenter - 0.25)) + (magbincenter - 0.25)
fakelcdict['objectinfo']['sdssr'] = np.asscalar(chosenmag) # depends on [control=['if'], data=[]]
else:
# if there are no mags available at all, generate a random mag
# between 8 and 16.0
LOGWARNING(' %s: assigning a random mag from uniform distribution between 8.0 and 16.0' % lcfile)
fakelcdict['objectinfo']['sdssr'] = npr.random() * 8.0 + 8.0 # depends on [control=['if'], data=[]]
else:
# if there's no info available, generate fake info
LOGWARNING('no object information found in %s, generating random ra, decl, sdssr' % lcfile)
fakelcdict['objectinfo']['ra'] = npr.random() * 360.0
fakelcdict['objectinfo']['decl'] = npr.random() * 180.0 - 90.0
fakelcdict['objectinfo']['sdssr'] = npr.random() * 8.0 + 8.0
#
# NOW FILL IN THE TIMES, MAGS, ERRS
#
# get the time columns
for (tcind, tcol) in enumerate(timecols):
if '.' in tcol:
tcolget = tcol.split('.') # depends on [control=['if'], data=['tcol']]
else:
tcolget = [tcol]
if tcol not in fakelcdict:
fakelcdict[tcol] = _dict_get(lcdict, tcolget)
fakelcdict['columns'].append(tcol)
# update the ndet with the first time column's size. it's possible
# that different time columns have different lengths, but that would
# be weird and we won't deal with it for now
if tcind == 0:
fakelcdict['objectinfo']['ndet'] = fakelcdict[tcol].size # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['tcol', 'fakelcdict']] # depends on [control=['for'], data=[]]
# get the mag columns
for mcol in magcols:
if '.' in mcol:
mcolget = mcol.split('.') # depends on [control=['if'], data=['mcol']]
else:
mcolget = [mcol]
# put the mcol in only once
if mcol not in fakelcdict:
measuredmags = _dict_get(lcdict, mcolget)
measuredmags = measuredmags[np.isfinite(measuredmags)]
# if we're randomizing, get the mags from the interpolated mag-RMS
# relation
if randomizemags and magrms and (mcol in magrms) and ('interpolated_magmad' in magrms[mcol]) and (magrms[mcol]['interpolated_magmad'] is not None):
interpfunc = magrms[mcol]['interpolated_magmad']
lcmad = interpfunc(fakelcdict['objectinfo']['sdssr'])
fakelcdict['moments'][mcol] = {'median': fakelcdict['objectinfo']['sdssr'], 'mad': lcmad} # depends on [control=['if'], data=[]]
# if we're not randomizing, get the median and MAD from the light
# curve itself
# we require at least 10 finite measurements
elif measuredmags.size > 9:
measuredmedian = np.median(measuredmags)
measuredmad = np.median(np.abs(measuredmags - measuredmedian))
fakelcdict['moments'][mcol] = {'median': measuredmedian, 'mad': measuredmad} # depends on [control=['if'], data=[]]
# if there aren't enough measurements in this LC, try to get the
# median and RMS from the interpolated mag-RMS relation first
elif magrms and mcol in magrms and ('interpolated_magmad' in magrms[mcol]) and (magrms[mcol]['interpolated_magmad'] is not None):
LOGWARNING('input LC %s does not have enough finite measurements, generating mag moments from fakelc sdssr and the mag-RMS relation' % lcfile)
interpfunc = magrms[mcol]['interpolated_magmad']
lcmad = interpfunc(fakelcdict['objectinfo']['sdssr'])
fakelcdict['moments'][mcol] = {'median': fakelcdict['objectinfo']['sdssr'], 'mad': lcmad} # depends on [control=['if'], data=[]]
else:
# if we don't have the mag-RMS relation either, then we
# can't do anything for this light curve, generate a random
# MAD between 5e-4 and 0.1
LOGWARNING('input LC %s does not have enough finite measurements and no mag-RMS relation provided assigning a random MAD between 5.0e-4 and 0.1' % lcfile)
fakelcdict['moments'][mcol] = {'median': fakelcdict['objectinfo']['sdssr'], 'mad': npr.random() * (0.1 - 0.0005) + 0.0005}
# the magnitude column is set to all zeros initially. this will be
# filled in by the add_fakelc_variability function below
fakelcdict[mcol] = np.full_like(_dict_get(lcdict, mcolget), 0.0)
fakelcdict['columns'].append(mcol) # depends on [control=['if'], data=['mcol', 'fakelcdict']] # depends on [control=['for'], data=['mcol']]
# get the err columns
for (mcol, ecol) in zip(magcols, errcols):
if '.' in ecol:
ecolget = ecol.split('.') # depends on [control=['if'], data=['ecol']]
else:
ecolget = [ecol]
if ecol not in fakelcdict:
measurederrs = _dict_get(lcdict, ecolget)
measurederrs = measurederrs[np.isfinite(measurederrs)]
# if we're randomizing, get the errs from the interpolated mag-RMS
# relation
if randomizemags and magrms and (mcol in magrms) and ('interpolated_magmad' in magrms[mcol]) and (magrms[mcol]['interpolated_magmad'] is not None):
interpfunc = magrms[mcol]['interpolated_magmad']
lcmad = interpfunc(fakelcdict['objectinfo']['sdssr'])
# the median of the errs = lcmad
# the mad of the errs is 0.1 x lcmad
fakelcdict['moments'][ecol] = {'median': lcmad, 'mad': 0.1 * lcmad} # depends on [control=['if'], data=[]]
# we require at least 10 finite measurements
# we'll calculate the median and MAD of the errs to use later on
elif measurederrs.size > 9:
measuredmedian = np.median(measurederrs)
measuredmad = np.median(np.abs(measurederrs - measuredmedian))
fakelcdict['moments'][ecol] = {'median': measuredmedian, 'mad': measuredmad} # depends on [control=['if'], data=[]]
elif magrms and mcol in magrms and ('interpolated_magmad' in magrms[mcol]) and (magrms[mcol]['interpolated_magmad'] is not None):
LOGWARNING('input LC %s does not have enough finite measurements, generating err moments from the mag-RMS relation' % lcfile)
interpfunc = magrms[mcol]['interpolated_magmad']
lcmad = interpfunc(fakelcdict['objectinfo']['sdssr'])
fakelcdict['moments'][ecol] = {'median': lcmad, 'mad': 0.1 * lcmad} # depends on [control=['if'], data=[]]
else:
# if we don't have the mag-RMS relation either, then we
# can't do anything for this light curve, generate a random
# MAD between 5e-4 and 0.1
LOGWARNING('input LC %s does not have enough finite measurements and no mag-RMS relation provided, generating errs randomly' % lcfile)
fakelcdict['moments'][ecol] = {'median': npr.random() * (0.01 - 0.0005) + 0.0005, 'mad': npr.random() * (0.01 - 0.0005) + 0.0005}
# the errors column is set to all zeros initially. this will be
# filled in by the add_fakelc_variability function below.
fakelcdict[ecol] = np.full_like(_dict_get(lcdict, ecolget), 0.0)
fakelcdict['columns'].append(ecol) # depends on [control=['if'], data=['ecol', 'fakelcdict']] # depends on [control=['for'], data=[]]
# add the timecols, magcols, errcols to the lcdict
fakelcdict['timecols'] = timecols
fakelcdict['magcols'] = magcols
fakelcdict['errcols'] = errcols
# generate an output file name
fakelcfname = '%s-fakelc.pkl' % fakelcdict['objectid']
fakelcfpath = os.path.abspath(os.path.join(outdir, fakelcfname))
# write this out to the output directory
with open(fakelcfpath, 'wb') as outfd:
pickle.dump(fakelcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL) # depends on [control=['with'], data=['outfd']]
# return the fakelc path, its columns, info, and moments so we can put them
# into a collection DB later on
LOGINFO('real LC %s -> fake LC %s OK' % (lcfile, fakelcfpath))
return (fakelcfpath, fakelcdict['columns'], fakelcdict['objectinfo'], fakelcdict['moments'])
|
def update(self, alert_condition_infra_id, policy_id,
name, condition_type, alert_condition_configuration, enabled=True):
"""
This API endpoint allows you to update an alert condition for infrastucture
:type alert_condition_infra_id: int
:param alert_condition_infra_id: Alert Condition Infra ID
:type policy_id: int
:param policy_id: Alert policy id
:type name: str
:param name: The name of the alert condition
:type condition_type: str
:param condition_type: The type of the alert condition can be
infra_process_running, infra_metric or infra_host_not_reporting
:type alert_condition_configuration: hash
:param alert_condition_configuration: hash containing config for the alert
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
}
"""
data = {
"data": alert_condition_configuration
}
data['data']['type'] = condition_type
data['data']['policy_id'] = policy_id
data['data']['name'] = name
data['data']['enabled'] = enabled
return self._put(
url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id),
headers=self.headers,
data=data
)
|
def function[update, parameter[self, alert_condition_infra_id, policy_id, name, condition_type, alert_condition_configuration, enabled]]:
constant[
This API endpoint allows you to update an alert condition for infrastucture
:type alert_condition_infra_id: int
:param alert_condition_infra_id: Alert Condition Infra ID
:type policy_id: int
:param policy_id: Alert policy id
:type name: str
:param name: The name of the alert condition
:type condition_type: str
:param condition_type: The type of the alert condition can be
infra_process_running, infra_metric or infra_host_not_reporting
:type alert_condition_configuration: hash
:param alert_condition_configuration: hash containing config for the alert
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
}
]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e5a650>], [<ast.Name object at 0x7da1b0e5ae60>]]
call[call[name[data]][constant[data]]][constant[type]] assign[=] name[condition_type]
call[call[name[data]][constant[data]]][constant[policy_id]] assign[=] name[policy_id]
call[call[name[data]][constant[data]]][constant[name]] assign[=] name[name]
call[call[name[data]][constant[data]]][constant[enabled]] assign[=] name[enabled]
return[call[name[self]._put, parameter[]]]
|
keyword[def] identifier[update] ( identifier[self] , identifier[alert_condition_infra_id] , identifier[policy_id] ,
identifier[name] , identifier[condition_type] , identifier[alert_condition_configuration] , identifier[enabled] = keyword[True] ):
literal[string]
identifier[data] ={
literal[string] : identifier[alert_condition_configuration]
}
identifier[data] [ literal[string] ][ literal[string] ]= identifier[condition_type]
identifier[data] [ literal[string] ][ literal[string] ]= identifier[policy_id]
identifier[data] [ literal[string] ][ literal[string] ]= identifier[name]
identifier[data] [ literal[string] ][ literal[string] ]= identifier[enabled]
keyword[return] identifier[self] . identifier[_put] (
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[URL] , identifier[alert_condition_infra_id] ),
identifier[headers] = identifier[self] . identifier[headers] ,
identifier[data] = identifier[data]
)
|
def update(self, alert_condition_infra_id, policy_id, name, condition_type, alert_condition_configuration, enabled=True):
"""
This API endpoint allows you to update an alert condition for infrastucture
:type alert_condition_infra_id: int
:param alert_condition_infra_id: Alert Condition Infra ID
:type policy_id: int
:param policy_id: Alert policy id
:type name: str
:param name: The name of the alert condition
:type condition_type: str
:param condition_type: The type of the alert condition can be
infra_process_running, infra_metric or infra_host_not_reporting
:type alert_condition_configuration: hash
:param alert_condition_configuration: hash containing config for the alert
:type enabled: bool
:param enabled: Whether to enable that alert condition
:rtype: dict
:return: The JSON response of the API
::
{
"data": {
"id": "integer",
"policy_id": "integer",
"type": "string",
"name": "string",
"enabled": "boolean",
"where_clause": "string",
"comparison": "string",
"filter": "hash",
"critical_threshold": "hash",
"event_type": "string",
"process_where_clause": "string",
"created_at_epoch_millis": "time",
"updated_at_epoch_millis": "time"
}
}
"""
data = {'data': alert_condition_configuration}
data['data']['type'] = condition_type
data['data']['policy_id'] = policy_id
data['data']['name'] = name
data['data']['enabled'] = enabled
return self._put(url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id), headers=self.headers, data=data)
|
def _update_docinfo(self):
"""Update the PDF's DocumentInfo dictionary to match XMP metadata
The standard mapping is described here:
https://www.pdfa.org/pdfa-metadata-xmp-rdf-dublin-core/
"""
self._pdf.docinfo # Touch object to ensure it exists
for uri, element, docinfo_name, converter in self.DOCINFO_MAPPING:
qname = QName(uri, element)
try:
value = self[qname]
except KeyError:
if docinfo_name in self._pdf.docinfo:
del self._pdf.docinfo[docinfo_name]
continue
if converter:
try:
value = converter.docinfo_from_xmp(value)
except ValueError:
warn(
"The DocumentInfo field {} could not be updated from XMP".format(
docinfo_name
)
)
value = None
if value is None:
if docinfo_name in self._pdf.docinfo:
del self._pdf.docinfo[docinfo_name]
continue
value = re_xml_illegal_chars.sub('', value)
try:
# Try to save pure ASCII
self._pdf.docinfo[docinfo_name] = value.encode('ascii')
except UnicodeEncodeError:
# qpdf will serialize this as a UTF-16 with BOM string
self._pdf.docinfo[docinfo_name] = value
|
def function[_update_docinfo, parameter[self]]:
constant[Update the PDF's DocumentInfo dictionary to match XMP metadata
The standard mapping is described here:
https://www.pdfa.org/pdfa-metadata-xmp-rdf-dublin-core/
]
name[self]._pdf.docinfo
for taget[tuple[[<ast.Name object at 0x7da20c7c82e0>, <ast.Name object at 0x7da20c7c9270>, <ast.Name object at 0x7da20c7c9ae0>, <ast.Name object at 0x7da20c7ca410>]]] in starred[name[self].DOCINFO_MAPPING] begin[:]
variable[qname] assign[=] call[name[QName], parameter[name[uri], name[element]]]
<ast.Try object at 0x7da20c7c80a0>
if name[converter] begin[:]
<ast.Try object at 0x7da20c7cabf0>
if compare[name[value] is constant[None]] begin[:]
if compare[name[docinfo_name] in name[self]._pdf.docinfo] begin[:]
<ast.Delete object at 0x7da20c7c9420>
continue
variable[value] assign[=] call[name[re_xml_illegal_chars].sub, parameter[constant[], name[value]]]
<ast.Try object at 0x7da20c7c8af0>
|
keyword[def] identifier[_update_docinfo] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_pdf] . identifier[docinfo]
keyword[for] identifier[uri] , identifier[element] , identifier[docinfo_name] , identifier[converter] keyword[in] identifier[self] . identifier[DOCINFO_MAPPING] :
identifier[qname] = identifier[QName] ( identifier[uri] , identifier[element] )
keyword[try] :
identifier[value] = identifier[self] [ identifier[qname] ]
keyword[except] identifier[KeyError] :
keyword[if] identifier[docinfo_name] keyword[in] identifier[self] . identifier[_pdf] . identifier[docinfo] :
keyword[del] identifier[self] . identifier[_pdf] . identifier[docinfo] [ identifier[docinfo_name] ]
keyword[continue]
keyword[if] identifier[converter] :
keyword[try] :
identifier[value] = identifier[converter] . identifier[docinfo_from_xmp] ( identifier[value] )
keyword[except] identifier[ValueError] :
identifier[warn] (
literal[string] . identifier[format] (
identifier[docinfo_name]
)
)
identifier[value] = keyword[None]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[if] identifier[docinfo_name] keyword[in] identifier[self] . identifier[_pdf] . identifier[docinfo] :
keyword[del] identifier[self] . identifier[_pdf] . identifier[docinfo] [ identifier[docinfo_name] ]
keyword[continue]
identifier[value] = identifier[re_xml_illegal_chars] . identifier[sub] ( literal[string] , identifier[value] )
keyword[try] :
identifier[self] . identifier[_pdf] . identifier[docinfo] [ identifier[docinfo_name] ]= identifier[value] . identifier[encode] ( literal[string] )
keyword[except] identifier[UnicodeEncodeError] :
identifier[self] . identifier[_pdf] . identifier[docinfo] [ identifier[docinfo_name] ]= identifier[value]
|
def _update_docinfo(self):
"""Update the PDF's DocumentInfo dictionary to match XMP metadata
The standard mapping is described here:
https://www.pdfa.org/pdfa-metadata-xmp-rdf-dublin-core/
"""
self._pdf.docinfo # Touch object to ensure it exists
for (uri, element, docinfo_name, converter) in self.DOCINFO_MAPPING:
qname = QName(uri, element)
try:
value = self[qname] # depends on [control=['try'], data=[]]
except KeyError:
if docinfo_name in self._pdf.docinfo:
del self._pdf.docinfo[docinfo_name] # depends on [control=['if'], data=['docinfo_name']]
continue # depends on [control=['except'], data=[]]
if converter:
try:
value = converter.docinfo_from_xmp(value) # depends on [control=['try'], data=[]]
except ValueError:
warn('The DocumentInfo field {} could not be updated from XMP'.format(docinfo_name))
value = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if value is None:
if docinfo_name in self._pdf.docinfo:
del self._pdf.docinfo[docinfo_name] # depends on [control=['if'], data=['docinfo_name']]
continue # depends on [control=['if'], data=[]]
value = re_xml_illegal_chars.sub('', value)
try:
# Try to save pure ASCII
self._pdf.docinfo[docinfo_name] = value.encode('ascii') # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
# qpdf will serialize this as a UTF-16 with BOM string
self._pdf.docinfo[docinfo_name] = value # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
|
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
|
def function[time, parameter[self]]:
constant[Return the time part, with tzinfo None.]
return[call[name[time], parameter[name[self].hour, name[self].minute, name[self].second, name[self].microsecond]]]
|
keyword[def] identifier[time] ( identifier[self] ):
literal[string]
keyword[return] identifier[time] ( identifier[self] . identifier[hour] , identifier[self] . identifier[minute] , identifier[self] . identifier[second] , identifier[self] . identifier[microsecond] )
|
def time(self):
"""Return the time part, with tzinfo None."""
return time(self.hour, self.minute, self.second, self.microsecond)
|
def create_oqhazardlib_source(self, tom, mesh_spacing, area_discretisation,
use_defaults=False):
"""
Converts the source model into an instance of the :class:
openquake.hazardlib.source.area.AreaSource
:param tom:
Temporal Occurrence model as instance of :class:
openquake.hazardlib.tom.TOM
:param float mesh_spacing:
Mesh spacing
"""
if not self.mfd:
raise ValueError("Cannot write to hazardlib without MFD")
return AreaSource(
self.id,
self.name,
self.trt,
self.mfd,
mesh_spacing,
conv.mag_scale_rel_to_hazardlib(self.mag_scale_rel, use_defaults),
conv.render_aspect_ratio(self.rupt_aspect_ratio, use_defaults),
tom,
self.upper_depth,
self.lower_depth,
conv.npd_to_pmf(self.nodal_plane_dist, use_defaults),
conv.hdd_to_pmf(self.hypo_depth_dist, use_defaults),
self.geometry,
area_discretisation)
|
def function[create_oqhazardlib_source, parameter[self, tom, mesh_spacing, area_discretisation, use_defaults]]:
constant[
Converts the source model into an instance of the :class:
openquake.hazardlib.source.area.AreaSource
:param tom:
Temporal Occurrence model as instance of :class:
openquake.hazardlib.tom.TOM
:param float mesh_spacing:
Mesh spacing
]
if <ast.UnaryOp object at 0x7da20c794be0> begin[:]
<ast.Raise object at 0x7da20c7947f0>
return[call[name[AreaSource], parameter[name[self].id, name[self].name, name[self].trt, name[self].mfd, name[mesh_spacing], call[name[conv].mag_scale_rel_to_hazardlib, parameter[name[self].mag_scale_rel, name[use_defaults]]], call[name[conv].render_aspect_ratio, parameter[name[self].rupt_aspect_ratio, name[use_defaults]]], name[tom], name[self].upper_depth, name[self].lower_depth, call[name[conv].npd_to_pmf, parameter[name[self].nodal_plane_dist, name[use_defaults]]], call[name[conv].hdd_to_pmf, parameter[name[self].hypo_depth_dist, name[use_defaults]]], name[self].geometry, name[area_discretisation]]]]
|
keyword[def] identifier[create_oqhazardlib_source] ( identifier[self] , identifier[tom] , identifier[mesh_spacing] , identifier[area_discretisation] ,
identifier[use_defaults] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[mfd] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[AreaSource] (
identifier[self] . identifier[id] ,
identifier[self] . identifier[name] ,
identifier[self] . identifier[trt] ,
identifier[self] . identifier[mfd] ,
identifier[mesh_spacing] ,
identifier[conv] . identifier[mag_scale_rel_to_hazardlib] ( identifier[self] . identifier[mag_scale_rel] , identifier[use_defaults] ),
identifier[conv] . identifier[render_aspect_ratio] ( identifier[self] . identifier[rupt_aspect_ratio] , identifier[use_defaults] ),
identifier[tom] ,
identifier[self] . identifier[upper_depth] ,
identifier[self] . identifier[lower_depth] ,
identifier[conv] . identifier[npd_to_pmf] ( identifier[self] . identifier[nodal_plane_dist] , identifier[use_defaults] ),
identifier[conv] . identifier[hdd_to_pmf] ( identifier[self] . identifier[hypo_depth_dist] , identifier[use_defaults] ),
identifier[self] . identifier[geometry] ,
identifier[area_discretisation] )
|
def create_oqhazardlib_source(self, tom, mesh_spacing, area_discretisation, use_defaults=False):
"""
Converts the source model into an instance of the :class:
openquake.hazardlib.source.area.AreaSource
:param tom:
Temporal Occurrence model as instance of :class:
openquake.hazardlib.tom.TOM
:param float mesh_spacing:
Mesh spacing
"""
if not self.mfd:
raise ValueError('Cannot write to hazardlib without MFD') # depends on [control=['if'], data=[]]
return AreaSource(self.id, self.name, self.trt, self.mfd, mesh_spacing, conv.mag_scale_rel_to_hazardlib(self.mag_scale_rel, use_defaults), conv.render_aspect_ratio(self.rupt_aspect_ratio, use_defaults), tom, self.upper_depth, self.lower_depth, conv.npd_to_pmf(self.nodal_plane_dist, use_defaults), conv.hdd_to_pmf(self.hypo_depth_dist, use_defaults), self.geometry, area_discretisation)
|
def render_entries(cls, entries, additional_columns=None,
only_show=None, numbers=False):
"""
Pretty-prints a list of entries. If the window is wide enough to
support printing as a table, runs the `print_table.render_table`
function on the table. Otherwise, constructs a line-by-line
representation..
:param entries: A list of entries.
:type entries: [:py:class:`HostEntry`]
:param additional_columns: Columns to show in addition to defaults.
:type additional_columns: ``list`` of ``str``
:param only_show: A specific list of columns to show.
:type only_show: ``NoneType`` or ``list`` of ``str``
:param numbers: Whether to include a number column.
:type numbers: ``bool``
:return: A pretty-printed string.
:rtype: ``str``
"""
additional_columns = additional_columns or []
if only_show is not None:
columns = _uniquify(only_show)
else:
columns = _uniquify(cls.DEFAULT_COLUMNS + additional_columns)
top_row = [cls.prettyname(col) for col in columns]
table = [top_row] if numbers is False else [[''] + top_row]
for i, entry in enumerate(entries):
row = [entry._get_attrib(c, convert_to_str=True) for c in columns]
table.append(row if numbers is False else [i] + row)
cur_width = get_current_terminal_width()
colors = [get_color_hash(c, MIN_COLOR_BRIGHT, MAX_COLOR_BRIGHT)
for c in columns]
if cur_width >= get_table_width(table):
return render_table(table,
column_colors=colors if numbers is False
else [green] + colors)
else:
result = []
first_index = 1 if numbers is True else 0
for row in table[1:]:
rep = [green('%s:' % row[0] if numbers is True else '-----')]
for i, val in enumerate(row[first_index:]):
color = colors[i-1 if numbers is True else i]
name = columns[i]
rep.append(' %s: %s' % (name, color(val)))
result.append('\n'.join(rep))
return '\n'.join(result)
|
def function[render_entries, parameter[cls, entries, additional_columns, only_show, numbers]]:
constant[
Pretty-prints a list of entries. If the window is wide enough to
support printing as a table, runs the `print_table.render_table`
function on the table. Otherwise, constructs a line-by-line
representation..
:param entries: A list of entries.
:type entries: [:py:class:`HostEntry`]
:param additional_columns: Columns to show in addition to defaults.
:type additional_columns: ``list`` of ``str``
:param only_show: A specific list of columns to show.
:type only_show: ``NoneType`` or ``list`` of ``str``
:param numbers: Whether to include a number column.
:type numbers: ``bool``
:return: A pretty-printed string.
:rtype: ``str``
]
variable[additional_columns] assign[=] <ast.BoolOp object at 0x7da20c76d1b0>
if compare[name[only_show] is_not constant[None]] begin[:]
variable[columns] assign[=] call[name[_uniquify], parameter[name[only_show]]]
variable[top_row] assign[=] <ast.ListComp object at 0x7da20c76f7c0>
variable[table] assign[=] <ast.IfExp object at 0x7da20c76f790>
for taget[tuple[[<ast.Name object at 0x7da20c76cdf0>, <ast.Name object at 0x7da20c76f280>]]] in starred[call[name[enumerate], parameter[name[entries]]]] begin[:]
variable[row] assign[=] <ast.ListComp object at 0x7da20c76f130>
call[name[table].append, parameter[<ast.IfExp object at 0x7da20c76ee90>]]
variable[cur_width] assign[=] call[name[get_current_terminal_width], parameter[]]
variable[colors] assign[=] <ast.ListComp object at 0x7da20c76c0a0>
if compare[name[cur_width] greater_or_equal[>=] call[name[get_table_width], parameter[name[table]]]] begin[:]
return[call[name[render_table], parameter[name[table]]]]
|
keyword[def] identifier[render_entries] ( identifier[cls] , identifier[entries] , identifier[additional_columns] = keyword[None] ,
identifier[only_show] = keyword[None] , identifier[numbers] = keyword[False] ):
literal[string]
identifier[additional_columns] = identifier[additional_columns] keyword[or] []
keyword[if] identifier[only_show] keyword[is] keyword[not] keyword[None] :
identifier[columns] = identifier[_uniquify] ( identifier[only_show] )
keyword[else] :
identifier[columns] = identifier[_uniquify] ( identifier[cls] . identifier[DEFAULT_COLUMNS] + identifier[additional_columns] )
identifier[top_row] =[ identifier[cls] . identifier[prettyname] ( identifier[col] ) keyword[for] identifier[col] keyword[in] identifier[columns] ]
identifier[table] =[ identifier[top_row] ] keyword[if] identifier[numbers] keyword[is] keyword[False] keyword[else] [[ literal[string] ]+ identifier[top_row] ]
keyword[for] identifier[i] , identifier[entry] keyword[in] identifier[enumerate] ( identifier[entries] ):
identifier[row] =[ identifier[entry] . identifier[_get_attrib] ( identifier[c] , identifier[convert_to_str] = keyword[True] ) keyword[for] identifier[c] keyword[in] identifier[columns] ]
identifier[table] . identifier[append] ( identifier[row] keyword[if] identifier[numbers] keyword[is] keyword[False] keyword[else] [ identifier[i] ]+ identifier[row] )
identifier[cur_width] = identifier[get_current_terminal_width] ()
identifier[colors] =[ identifier[get_color_hash] ( identifier[c] , identifier[MIN_COLOR_BRIGHT] , identifier[MAX_COLOR_BRIGHT] )
keyword[for] identifier[c] keyword[in] identifier[columns] ]
keyword[if] identifier[cur_width] >= identifier[get_table_width] ( identifier[table] ):
keyword[return] identifier[render_table] ( identifier[table] ,
identifier[column_colors] = identifier[colors] keyword[if] identifier[numbers] keyword[is] keyword[False]
keyword[else] [ identifier[green] ]+ identifier[colors] )
keyword[else] :
identifier[result] =[]
identifier[first_index] = literal[int] keyword[if] identifier[numbers] keyword[is] keyword[True] keyword[else] literal[int]
keyword[for] identifier[row] keyword[in] identifier[table] [ literal[int] :]:
identifier[rep] =[ identifier[green] ( literal[string] % identifier[row] [ literal[int] ] keyword[if] identifier[numbers] keyword[is] keyword[True] keyword[else] literal[string] )]
keyword[for] identifier[i] , identifier[val] keyword[in] identifier[enumerate] ( identifier[row] [ identifier[first_index] :]):
identifier[color] = identifier[colors] [ identifier[i] - literal[int] keyword[if] identifier[numbers] keyword[is] keyword[True] keyword[else] identifier[i] ]
identifier[name] = identifier[columns] [ identifier[i] ]
identifier[rep] . identifier[append] ( literal[string] %( identifier[name] , identifier[color] ( identifier[val] )))
identifier[result] . identifier[append] ( literal[string] . identifier[join] ( identifier[rep] ))
keyword[return] literal[string] . identifier[join] ( identifier[result] )
|
def render_entries(cls, entries, additional_columns=None, only_show=None, numbers=False):
"""
Pretty-prints a list of entries. If the window is wide enough to
support printing as a table, runs the `print_table.render_table`
function on the table. Otherwise, constructs a line-by-line
representation..
:param entries: A list of entries.
:type entries: [:py:class:`HostEntry`]
:param additional_columns: Columns to show in addition to defaults.
:type additional_columns: ``list`` of ``str``
:param only_show: A specific list of columns to show.
:type only_show: ``NoneType`` or ``list`` of ``str``
:param numbers: Whether to include a number column.
:type numbers: ``bool``
:return: A pretty-printed string.
:rtype: ``str``
"""
additional_columns = additional_columns or []
if only_show is not None:
columns = _uniquify(only_show) # depends on [control=['if'], data=['only_show']]
else:
columns = _uniquify(cls.DEFAULT_COLUMNS + additional_columns)
top_row = [cls.prettyname(col) for col in columns]
table = [top_row] if numbers is False else [[''] + top_row]
for (i, entry) in enumerate(entries):
row = [entry._get_attrib(c, convert_to_str=True) for c in columns]
table.append(row if numbers is False else [i] + row) # depends on [control=['for'], data=[]]
cur_width = get_current_terminal_width()
colors = [get_color_hash(c, MIN_COLOR_BRIGHT, MAX_COLOR_BRIGHT) for c in columns]
if cur_width >= get_table_width(table):
return render_table(table, column_colors=colors if numbers is False else [green] + colors) # depends on [control=['if'], data=[]]
else:
result = []
first_index = 1 if numbers is True else 0
for row in table[1:]:
rep = [green('%s:' % row[0] if numbers is True else '-----')]
for (i, val) in enumerate(row[first_index:]):
color = colors[i - 1 if numbers is True else i]
name = columns[i]
rep.append(' %s: %s' % (name, color(val))) # depends on [control=['for'], data=[]]
result.append('\n'.join(rep)) # depends on [control=['for'], data=['row']]
return '\n'.join(result)
|
def _create_ids(self, home_teams, away_teams):
"""
Creates IDs for both players/teams
"""
categories = pd.Categorical(np.append(home_teams,away_teams))
home_id, away_id = categories.codes[0:int(len(categories)/2)], categories.codes[int(len(categories)/2):len(categories)+1]
return home_id, away_id
|
def function[_create_ids, parameter[self, home_teams, away_teams]]:
constant[
Creates IDs for both players/teams
]
variable[categories] assign[=] call[name[pd].Categorical, parameter[call[name[np].append, parameter[name[home_teams], name[away_teams]]]]]
<ast.Tuple object at 0x7da1b170edd0> assign[=] tuple[[<ast.Subscript object at 0x7da1b170fca0>, <ast.Subscript object at 0x7da1b170fc10>]]
return[tuple[[<ast.Name object at 0x7da1b170e9b0>, <ast.Name object at 0x7da1b170de10>]]]
|
keyword[def] identifier[_create_ids] ( identifier[self] , identifier[home_teams] , identifier[away_teams] ):
literal[string]
identifier[categories] = identifier[pd] . identifier[Categorical] ( identifier[np] . identifier[append] ( identifier[home_teams] , identifier[away_teams] ))
identifier[home_id] , identifier[away_id] = identifier[categories] . identifier[codes] [ literal[int] : identifier[int] ( identifier[len] ( identifier[categories] )/ literal[int] )], identifier[categories] . identifier[codes] [ identifier[int] ( identifier[len] ( identifier[categories] )/ literal[int] ): identifier[len] ( identifier[categories] )+ literal[int] ]
keyword[return] identifier[home_id] , identifier[away_id]
|
def _create_ids(self, home_teams, away_teams):
"""
Creates IDs for both players/teams
"""
categories = pd.Categorical(np.append(home_teams, away_teams))
(home_id, away_id) = (categories.codes[0:int(len(categories) / 2)], categories.codes[int(len(categories) / 2):len(categories) + 1])
return (home_id, away_id)
|
def load_message_classes():
"""Load the 'fedora.messages' entry points and register the message classes."""
for message in pkg_resources.iter_entry_points("fedora.messages"):
cls = message.load()
_log.info(
"Registering the '%s' key as the '%r' class in the Message "
"class registry",
message.name,
cls,
)
_schema_name_to_class[message.name] = cls
_class_to_schema_name[cls] = message.name
global _registry_loaded
_registry_loaded = True
|
def function[load_message_classes, parameter[]]:
constant[Load the 'fedora.messages' entry points and register the message classes.]
for taget[name[message]] in starred[call[name[pkg_resources].iter_entry_points, parameter[constant[fedora.messages]]]] begin[:]
variable[cls] assign[=] call[name[message].load, parameter[]]
call[name[_log].info, parameter[constant[Registering the '%s' key as the '%r' class in the Message class registry], name[message].name, name[cls]]]
call[name[_schema_name_to_class]][name[message].name] assign[=] name[cls]
call[name[_class_to_schema_name]][name[cls]] assign[=] name[message].name
<ast.Global object at 0x7da1b05da710>
variable[_registry_loaded] assign[=] constant[True]
|
keyword[def] identifier[load_message_classes] ():
literal[string]
keyword[for] identifier[message] keyword[in] identifier[pkg_resources] . identifier[iter_entry_points] ( literal[string] ):
identifier[cls] = identifier[message] . identifier[load] ()
identifier[_log] . identifier[info] (
literal[string]
literal[string] ,
identifier[message] . identifier[name] ,
identifier[cls] ,
)
identifier[_schema_name_to_class] [ identifier[message] . identifier[name] ]= identifier[cls]
identifier[_class_to_schema_name] [ identifier[cls] ]= identifier[message] . identifier[name]
keyword[global] identifier[_registry_loaded]
identifier[_registry_loaded] = keyword[True]
|
def load_message_classes():
"""Load the 'fedora.messages' entry points and register the message classes."""
for message in pkg_resources.iter_entry_points('fedora.messages'):
cls = message.load()
_log.info("Registering the '%s' key as the '%r' class in the Message class registry", message.name, cls)
_schema_name_to_class[message.name] = cls
_class_to_schema_name[cls] = message.name # depends on [control=['for'], data=['message']]
global _registry_loaded
_registry_loaded = True
|
def to_dict(self, short_pred=True, properties=True):
"""
Encode the Dmrs as a dictionary suitable for JSON serialization.
"""
qs = set(self.nodeids(quantifier=True))
def _lnk(obj): return {'from': obj.cfrom, 'to': obj.cto}
def _node(node, short_pred=True):
p = node.pred.short_form() if short_pred else node.pred.string
d = dict(nodeid=node.nodeid, predicate=p)
if node.lnk is not None: d['lnk'] = _lnk(node)
if properties and node.sortinfo:
if node.nodeid not in qs:
d['sortinfo'] = node.sortinfo
if node.surface is not None: d['surface'] = node.surface
if node.base is not None: d['base'] = node.base
if node.carg is not None: d['carg'] = node.carg
return d
def _link(link): return {
'from': link.start, 'to': link.end,
'rargname': link.rargname, 'post': link.post
}
d = dict(
nodes=[_node(n) for n in nodes(self)],
links=[_link(l) for l in links(self)]
)
# if self.top is not None: ... currently handled by links
if self.index is not None:
idx = self.nodeid(self.index)
if idx is not None:
d['index'] = idx
if self.xarg is not None:
xarg = self.nodeid(self.index)
if xarg is not None:
d['index'] = xarg
if self.lnk is not None: d['lnk'] = _lnk(self)
if self.surface is not None: d['surface'] = self.surface
if self.identifier is not None: d['identifier'] = self.identifier
return d
|
def function[to_dict, parameter[self, short_pred, properties]]:
constant[
Encode the Dmrs as a dictionary suitable for JSON serialization.
]
variable[qs] assign[=] call[name[set], parameter[call[name[self].nodeids, parameter[]]]]
def function[_lnk, parameter[obj]]:
return[dictionary[[<ast.Constant object at 0x7da1b0401630>, <ast.Constant object at 0x7da1b04002b0>], [<ast.Attribute object at 0x7da1b04010f0>, <ast.Attribute object at 0x7da1b0403820>]]]
def function[_node, parameter[node, short_pred]]:
variable[p] assign[=] <ast.IfExp object at 0x7da1b0400d30>
variable[d] assign[=] call[name[dict], parameter[]]
if compare[name[node].lnk is_not constant[None]] begin[:]
call[name[d]][constant[lnk]] assign[=] call[name[_lnk], parameter[name[node]]]
if <ast.BoolOp object at 0x7da1b04018a0> begin[:]
if compare[name[node].nodeid <ast.NotIn object at 0x7da2590d7190> name[qs]] begin[:]
call[name[d]][constant[sortinfo]] assign[=] name[node].sortinfo
if compare[name[node].surface is_not constant[None]] begin[:]
call[name[d]][constant[surface]] assign[=] name[node].surface
if compare[name[node].base is_not constant[None]] begin[:]
call[name[d]][constant[base]] assign[=] name[node].base
if compare[name[node].carg is_not constant[None]] begin[:]
call[name[d]][constant[carg]] assign[=] name[node].carg
return[name[d]]
def function[_link, parameter[link]]:
return[dictionary[[<ast.Constant object at 0x7da1b0403430>, <ast.Constant object at 0x7da1b0402a70>, <ast.Constant object at 0x7da1b0400160>, <ast.Constant object at 0x7da1b0401750>], [<ast.Attribute object at 0x7da1b0401d20>, <ast.Attribute object at 0x7da1b0401480>, <ast.Attribute object at 0x7da1b04030a0>, <ast.Attribute object at 0x7da1b0401570>]]]
variable[d] assign[=] call[name[dict], parameter[]]
if compare[name[self].index is_not constant[None]] begin[:]
variable[idx] assign[=] call[name[self].nodeid, parameter[name[self].index]]
if compare[name[idx] is_not constant[None]] begin[:]
call[name[d]][constant[index]] assign[=] name[idx]
if compare[name[self].xarg is_not constant[None]] begin[:]
variable[xarg] assign[=] call[name[self].nodeid, parameter[name[self].index]]
if compare[name[xarg] is_not constant[None]] begin[:]
call[name[d]][constant[index]] assign[=] name[xarg]
if compare[name[self].lnk is_not constant[None]] begin[:]
call[name[d]][constant[lnk]] assign[=] call[name[_lnk], parameter[name[self]]]
if compare[name[self].surface is_not constant[None]] begin[:]
call[name[d]][constant[surface]] assign[=] name[self].surface
if compare[name[self].identifier is_not constant[None]] begin[:]
call[name[d]][constant[identifier]] assign[=] name[self].identifier
return[name[d]]
|
keyword[def] identifier[to_dict] ( identifier[self] , identifier[short_pred] = keyword[True] , identifier[properties] = keyword[True] ):
literal[string]
identifier[qs] = identifier[set] ( identifier[self] . identifier[nodeids] ( identifier[quantifier] = keyword[True] ))
keyword[def] identifier[_lnk] ( identifier[obj] ): keyword[return] { literal[string] : identifier[obj] . identifier[cfrom] , literal[string] : identifier[obj] . identifier[cto] }
keyword[def] identifier[_node] ( identifier[node] , identifier[short_pred] = keyword[True] ):
identifier[p] = identifier[node] . identifier[pred] . identifier[short_form] () keyword[if] identifier[short_pred] keyword[else] identifier[node] . identifier[pred] . identifier[string]
identifier[d] = identifier[dict] ( identifier[nodeid] = identifier[node] . identifier[nodeid] , identifier[predicate] = identifier[p] )
keyword[if] identifier[node] . identifier[lnk] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[_lnk] ( identifier[node] )
keyword[if] identifier[properties] keyword[and] identifier[node] . identifier[sortinfo] :
keyword[if] identifier[node] . identifier[nodeid] keyword[not] keyword[in] identifier[qs] :
identifier[d] [ literal[string] ]= identifier[node] . identifier[sortinfo]
keyword[if] identifier[node] . identifier[surface] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[node] . identifier[surface]
keyword[if] identifier[node] . identifier[base] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[node] . identifier[base]
keyword[if] identifier[node] . identifier[carg] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[node] . identifier[carg]
keyword[return] identifier[d]
keyword[def] identifier[_link] ( identifier[link] ): keyword[return] {
literal[string] : identifier[link] . identifier[start] , literal[string] : identifier[link] . identifier[end] ,
literal[string] : identifier[link] . identifier[rargname] , literal[string] : identifier[link] . identifier[post]
}
identifier[d] = identifier[dict] (
identifier[nodes] =[ identifier[_node] ( identifier[n] ) keyword[for] identifier[n] keyword[in] identifier[nodes] ( identifier[self] )],
identifier[links] =[ identifier[_link] ( identifier[l] ) keyword[for] identifier[l] keyword[in] identifier[links] ( identifier[self] )]
)
keyword[if] identifier[self] . identifier[index] keyword[is] keyword[not] keyword[None] :
identifier[idx] = identifier[self] . identifier[nodeid] ( identifier[self] . identifier[index] )
keyword[if] identifier[idx] keyword[is] keyword[not] keyword[None] :
identifier[d] [ literal[string] ]= identifier[idx]
keyword[if] identifier[self] . identifier[xarg] keyword[is] keyword[not] keyword[None] :
identifier[xarg] = identifier[self] . identifier[nodeid] ( identifier[self] . identifier[index] )
keyword[if] identifier[xarg] keyword[is] keyword[not] keyword[None] :
identifier[d] [ literal[string] ]= identifier[xarg]
keyword[if] identifier[self] . identifier[lnk] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[_lnk] ( identifier[self] )
keyword[if] identifier[self] . identifier[surface] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[self] . identifier[surface]
keyword[if] identifier[self] . identifier[identifier] keyword[is] keyword[not] keyword[None] : identifier[d] [ literal[string] ]= identifier[self] . identifier[identifier]
keyword[return] identifier[d]
|
def to_dict(self, short_pred=True, properties=True):
"""
Encode the Dmrs as a dictionary suitable for JSON serialization.
"""
qs = set(self.nodeids(quantifier=True))
def _lnk(obj):
return {'from': obj.cfrom, 'to': obj.cto}
def _node(node, short_pred=True):
p = node.pred.short_form() if short_pred else node.pred.string
d = dict(nodeid=node.nodeid, predicate=p)
if node.lnk is not None:
d['lnk'] = _lnk(node) # depends on [control=['if'], data=[]]
if properties and node.sortinfo:
if node.nodeid not in qs:
d['sortinfo'] = node.sortinfo # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if node.surface is not None:
d['surface'] = node.surface # depends on [control=['if'], data=[]]
if node.base is not None:
d['base'] = node.base # depends on [control=['if'], data=[]]
if node.carg is not None:
d['carg'] = node.carg # depends on [control=['if'], data=[]]
return d
def _link(link):
return {'from': link.start, 'to': link.end, 'rargname': link.rargname, 'post': link.post}
d = dict(nodes=[_node(n) for n in nodes(self)], links=[_link(l) for l in links(self)])
# if self.top is not None: ... currently handled by links
if self.index is not None:
idx = self.nodeid(self.index)
if idx is not None:
d['index'] = idx # depends on [control=['if'], data=['idx']] # depends on [control=['if'], data=[]]
if self.xarg is not None:
xarg = self.nodeid(self.index)
if xarg is not None:
d['index'] = xarg # depends on [control=['if'], data=['xarg']] # depends on [control=['if'], data=[]]
if self.lnk is not None:
d['lnk'] = _lnk(self) # depends on [control=['if'], data=[]]
if self.surface is not None:
d['surface'] = self.surface # depends on [control=['if'], data=[]]
if self.identifier is not None:
d['identifier'] = self.identifier # depends on [control=['if'], data=[]]
return d
|
def createAARText(self):
'''Creates the text for airspeed, altitude and climb rate.'''
self.airspeedText = self.axes.text(self.rightPos-(self.vertSize/10.0),-0.97+(2*self.vertSize)-(self.vertSize/10.0),'AS: %.1f m/s' % self.airspeed,color='w',size=self.fontSize,ha='right')
self.altitudeText = self.axes.text(self.rightPos-(self.vertSize/10.0),-0.97+self.vertSize-(0.5*self.vertSize/10.0),'ALT: %.1f m ' % self.relAlt,color='w',size=self.fontSize,ha='right')
self.climbRateText = self.axes.text(self.rightPos-(self.vertSize/10.0),-0.97,'CR: %.1f m/s' % self.climbRate,color='w',size=self.fontSize,ha='right')
self.airspeedText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.altitudeText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
self.climbRateText.set_path_effects([PathEffects.withStroke(linewidth=1,foreground='k')])
|
def function[createAARText, parameter[self]]:
constant[Creates the text for airspeed, altitude and climb rate.]
name[self].airspeedText assign[=] call[name[self].axes.text, parameter[binary_operation[name[self].rightPos - binary_operation[name[self].vertSize / constant[10.0]]], binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b17de500> + binary_operation[constant[2] * name[self].vertSize]] - binary_operation[name[self].vertSize / constant[10.0]]], binary_operation[constant[AS: %.1f m/s] <ast.Mod object at 0x7da2590d6920> name[self].airspeed]]]
name[self].altitudeText assign[=] call[name[self].axes.text, parameter[binary_operation[name[self].rightPos - binary_operation[name[self].vertSize / constant[10.0]]], binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b17f8640> + name[self].vertSize] - binary_operation[binary_operation[constant[0.5] * name[self].vertSize] / constant[10.0]]], binary_operation[constant[ALT: %.1f m ] <ast.Mod object at 0x7da2590d6920> name[self].relAlt]]]
name[self].climbRateText assign[=] call[name[self].axes.text, parameter[binary_operation[name[self].rightPos - binary_operation[name[self].vertSize / constant[10.0]]], <ast.UnaryOp object at 0x7da1b17f97b0>, binary_operation[constant[CR: %.1f m/s] <ast.Mod object at 0x7da2590d6920> name[self].climbRate]]]
call[name[self].airspeedText.set_path_effects, parameter[list[[<ast.Call object at 0x7da1b17f8f10>]]]]
call[name[self].altitudeText.set_path_effects, parameter[list[[<ast.Call object at 0x7da1b17f9180>]]]]
call[name[self].climbRateText.set_path_effects, parameter[list[[<ast.Call object at 0x7da1b17f9360>]]]]
|
keyword[def] identifier[createAARText] ( identifier[self] ):
literal[string]
identifier[self] . identifier[airspeedText] = identifier[self] . identifier[axes] . identifier[text] ( identifier[self] . identifier[rightPos] -( identifier[self] . identifier[vertSize] / literal[int] ),- literal[int] +( literal[int] * identifier[self] . identifier[vertSize] )-( identifier[self] . identifier[vertSize] / literal[int] ), literal[string] % identifier[self] . identifier[airspeed] , identifier[color] = literal[string] , identifier[size] = identifier[self] . identifier[fontSize] , identifier[ha] = literal[string] )
identifier[self] . identifier[altitudeText] = identifier[self] . identifier[axes] . identifier[text] ( identifier[self] . identifier[rightPos] -( identifier[self] . identifier[vertSize] / literal[int] ),- literal[int] + identifier[self] . identifier[vertSize] -( literal[int] * identifier[self] . identifier[vertSize] / literal[int] ), literal[string] % identifier[self] . identifier[relAlt] , identifier[color] = literal[string] , identifier[size] = identifier[self] . identifier[fontSize] , identifier[ha] = literal[string] )
identifier[self] . identifier[climbRateText] = identifier[self] . identifier[axes] . identifier[text] ( identifier[self] . identifier[rightPos] -( identifier[self] . identifier[vertSize] / literal[int] ),- literal[int] , literal[string] % identifier[self] . identifier[climbRate] , identifier[color] = literal[string] , identifier[size] = identifier[self] . identifier[fontSize] , identifier[ha] = literal[string] )
identifier[self] . identifier[airspeedText] . identifier[set_path_effects] ([ identifier[PathEffects] . identifier[withStroke] ( identifier[linewidth] = literal[int] , identifier[foreground] = literal[string] )])
identifier[self] . identifier[altitudeText] . identifier[set_path_effects] ([ identifier[PathEffects] . identifier[withStroke] ( identifier[linewidth] = literal[int] , identifier[foreground] = literal[string] )])
identifier[self] . identifier[climbRateText] . identifier[set_path_effects] ([ identifier[PathEffects] . identifier[withStroke] ( identifier[linewidth] = literal[int] , identifier[foreground] = literal[string] )])
|
def createAARText(self):
"""Creates the text for airspeed, altitude and climb rate."""
self.airspeedText = self.axes.text(self.rightPos - self.vertSize / 10.0, -0.97 + 2 * self.vertSize - self.vertSize / 10.0, 'AS: %.1f m/s' % self.airspeed, color='w', size=self.fontSize, ha='right')
self.altitudeText = self.axes.text(self.rightPos - self.vertSize / 10.0, -0.97 + self.vertSize - 0.5 * self.vertSize / 10.0, 'ALT: %.1f m ' % self.relAlt, color='w', size=self.fontSize, ha='right')
self.climbRateText = self.axes.text(self.rightPos - self.vertSize / 10.0, -0.97, 'CR: %.1f m/s' % self.climbRate, color='w', size=self.fontSize, ha='right')
self.airspeedText.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='k')])
self.altitudeText.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='k')])
self.climbRateText.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='k')])
|
def getConfigDirectory():
"""
Determines the platform-specific config directory location for ue4cli
"""
if platform.system() == 'Windows':
return os.path.join(os.environ['APPDATA'], 'ue4cli')
else:
return os.path.join(os.environ['HOME'], '.config', 'ue4cli')
|
def function[getConfigDirectory, parameter[]]:
constant[
Determines the platform-specific config directory location for ue4cli
]
if compare[call[name[platform].system, parameter[]] equal[==] constant[Windows]] begin[:]
return[call[name[os].path.join, parameter[call[name[os].environ][constant[APPDATA]], constant[ue4cli]]]]
|
keyword[def] identifier[getConfigDirectory] ():
literal[string]
keyword[if] identifier[platform] . identifier[system] ()== literal[string] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[environ] [ literal[string] ], literal[string] )
keyword[else] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[environ] [ literal[string] ], literal[string] , literal[string] )
|
def getConfigDirectory():
"""
Determines the platform-specific config directory location for ue4cli
"""
if platform.system() == 'Windows':
return os.path.join(os.environ['APPDATA'], 'ue4cli') # depends on [control=['if'], data=[]]
else:
return os.path.join(os.environ['HOME'], '.config', 'ue4cli')
|
def fetch_all_data(self, limit=50000):
"""
Fetch data for all entities.
"""
# Query text
query = text(
"""
SELECT domain, entity_id, state, last_changed
FROM states
WHERE
state NOT IN ('unknown', 'unavailable')
ORDER BY last_changed DESC
LIMIT :limit
"""
)
try:
print("Querying the database, this could take a while")
response = self.perform_query(query, limit=limit)
master_df = pd.DataFrame(response.fetchall())
print("master_df created successfully.")
self._master_df = master_df.copy()
self.parse_all_data()
except:
raise ValueError("Error querying the database.")
|
def function[fetch_all_data, parameter[self, limit]]:
constant[
Fetch data for all entities.
]
variable[query] assign[=] call[name[text], parameter[constant[
SELECT domain, entity_id, state, last_changed
FROM states
WHERE
state NOT IN ('unknown', 'unavailable')
ORDER BY last_changed DESC
LIMIT :limit
]]]
<ast.Try object at 0x7da1b13874f0>
|
keyword[def] identifier[fetch_all_data] ( identifier[self] , identifier[limit] = literal[int] ):
literal[string]
identifier[query] = identifier[text] (
literal[string]
)
keyword[try] :
identifier[print] ( literal[string] )
identifier[response] = identifier[self] . identifier[perform_query] ( identifier[query] , identifier[limit] = identifier[limit] )
identifier[master_df] = identifier[pd] . identifier[DataFrame] ( identifier[response] . identifier[fetchall] ())
identifier[print] ( literal[string] )
identifier[self] . identifier[_master_df] = identifier[master_df] . identifier[copy] ()
identifier[self] . identifier[parse_all_data] ()
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] )
|
def fetch_all_data(self, limit=50000):
"""
Fetch data for all entities.
"""
# Query text
query = text("\n SELECT domain, entity_id, state, last_changed\n FROM states\n WHERE\n state NOT IN ('unknown', 'unavailable')\n ORDER BY last_changed DESC\n LIMIT :limit\n ")
try:
print('Querying the database, this could take a while')
response = self.perform_query(query, limit=limit)
master_df = pd.DataFrame(response.fetchall())
print('master_df created successfully.')
self._master_df = master_df.copy()
self.parse_all_data() # depends on [control=['try'], data=[]]
except:
raise ValueError('Error querying the database.') # depends on [control=['except'], data=[]]
|
def write_csv(path, rows, dialect='excel', fieldnames=None, quoting=csv.QUOTE_ALL, extrasaction='ignore', *args, **kwargs):
''' Write rows data to a CSV file (with or without fieldnames) '''
if not quoting:
quoting = csv.QUOTE_MINIMAL
if 'lineterminator' not in kwargs:
kwargs['lineterminator'] = '\n' # use \n to fix double-line in Windows
with open(path, mode='wt', newline='') as csvfile:
if fieldnames:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect=dialect, quoting=quoting, extrasaction=extrasaction, *args, **kwargs)
writer.writeheader()
for row in rows:
writer.writerow(row)
else:
writer = csv.writer(csvfile, dialect=dialect, quoting=quoting, *args, **kwargs)
for row in rows:
writer.writerow(row)
|
def function[write_csv, parameter[path, rows, dialect, fieldnames, quoting, extrasaction]]:
constant[ Write rows data to a CSV file (with or without fieldnames) ]
if <ast.UnaryOp object at 0x7da1b10616c0> begin[:]
variable[quoting] assign[=] name[csv].QUOTE_MINIMAL
if compare[constant[lineterminator] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[kwargs]][constant[lineterminator]] assign[=] constant[
]
with call[name[open], parameter[name[path]]] begin[:]
if name[fieldnames] begin[:]
variable[writer] assign[=] call[name[csv].DictWriter, parameter[name[csvfile], <ast.Starred object at 0x7da1b1061270>]]
call[name[writer].writeheader, parameter[]]
for taget[name[row]] in starred[name[rows]] begin[:]
call[name[writer].writerow, parameter[name[row]]]
|
keyword[def] identifier[write_csv] ( identifier[path] , identifier[rows] , identifier[dialect] = literal[string] , identifier[fieldnames] = keyword[None] , identifier[quoting] = identifier[csv] . identifier[QUOTE_ALL] , identifier[extrasaction] = literal[string] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[quoting] :
identifier[quoting] = identifier[csv] . identifier[QUOTE_MINIMAL]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= literal[string]
keyword[with] identifier[open] ( identifier[path] , identifier[mode] = literal[string] , identifier[newline] = literal[string] ) keyword[as] identifier[csvfile] :
keyword[if] identifier[fieldnames] :
identifier[writer] = identifier[csv] . identifier[DictWriter] ( identifier[csvfile] , identifier[fieldnames] = identifier[fieldnames] , identifier[dialect] = identifier[dialect] , identifier[quoting] = identifier[quoting] , identifier[extrasaction] = identifier[extrasaction] ,* identifier[args] ,** identifier[kwargs] )
identifier[writer] . identifier[writeheader] ()
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[writer] . identifier[writerow] ( identifier[row] )
keyword[else] :
identifier[writer] = identifier[csv] . identifier[writer] ( identifier[csvfile] , identifier[dialect] = identifier[dialect] , identifier[quoting] = identifier[quoting] ,* identifier[args] ,** identifier[kwargs] )
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[writer] . identifier[writerow] ( identifier[row] )
|
def write_csv(path, rows, dialect='excel', fieldnames=None, quoting=csv.QUOTE_ALL, extrasaction='ignore', *args, **kwargs):
""" Write rows data to a CSV file (with or without fieldnames) """
if not quoting:
quoting = csv.QUOTE_MINIMAL # depends on [control=['if'], data=[]]
if 'lineterminator' not in kwargs:
kwargs['lineterminator'] = '\n' # use \n to fix double-line in Windows # depends on [control=['if'], data=['kwargs']]
with open(path, mode='wt', newline='') as csvfile:
if fieldnames:
writer = csv.DictWriter(csvfile, *args, fieldnames=fieldnames, dialect=dialect, quoting=quoting, extrasaction=extrasaction, **kwargs)
writer.writeheader()
for row in rows:
writer.writerow(row) # depends on [control=['for'], data=['row']] # depends on [control=['if'], data=[]]
else:
writer = csv.writer(csvfile, *args, dialect=dialect, quoting=quoting, **kwargs)
for row in rows:
writer.writerow(row) # depends on [control=['for'], data=['row']] # depends on [control=['with'], data=['csvfile']]
|
def filter(self, table, idps, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [idp for idp in idps
if q in idp.ud.lower()]
|
def function[filter, parameter[self, table, idps, filter_string]]:
constant[Naive case-insensitive search.]
variable[q] assign[=] call[name[filter_string].lower, parameter[]]
return[<ast.ListComp object at 0x7da1b1985900>]
|
keyword[def] identifier[filter] ( identifier[self] , identifier[table] , identifier[idps] , identifier[filter_string] ):
literal[string]
identifier[q] = identifier[filter_string] . identifier[lower] ()
keyword[return] [ identifier[idp] keyword[for] identifier[idp] keyword[in] identifier[idps]
keyword[if] identifier[q] keyword[in] identifier[idp] . identifier[ud] . identifier[lower] ()]
|
def filter(self, table, idps, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [idp for idp in idps if q in idp.ud.lower()]
|
def worker(self):
"""
Calculates the quartet weights for the test at a random
subsampled chunk of loci.
"""
## subsample loci
fullseqs = self.sample_loci()
## find all iterations of samples for this quartet
liters = itertools.product(*self.imap.values())
## run tree inference for each iteration of sampledict
hashval = uuid.uuid4().hex
weights = []
for ridx, lidx in enumerate(liters):
## get subalignment for this iteration and make to nex
a,b,c,d = lidx
sub = {}
for i in lidx:
if self.rmap[i] == "p1":
sub["A"] = fullseqs[i]
elif self.rmap[i] == "p2":
sub["B"] = fullseqs[i]
elif self.rmap[i] == "p3":
sub["C"] = fullseqs[i]
else:
sub["D"] = fullseqs[i]
## write as nexus file
nex = []
for tax in list("ABCD"):
nex.append(">{} {}".format(tax, sub[tax]))
## check for too much missing or lack of variants
nsites, nvar = count_var(nex)
## only run test if there's variation present
if nvar > self.minsnps:
## format as nexus file
nexus = "{} {}\n".format(4, len(fullseqs[a])) + "\n".join(nex)
## infer ML tree
treeorder = self.run_tree_inference(nexus, "{}.{}".format(hashval, ridx))
## add to list
weights.append(treeorder)
## cleanup - remove all files with the hash val
rfiles = glob.glob(os.path.join(tempfile.tempdir, "*{}*".format(hashval)))
for rfile in rfiles:
if os.path.exists(rfile):
os.remove(rfile)
## return result as weights for the set topologies.
trees = ["ABCD", "ACBD", "ADBC"]
wdict = {i:float(weights.count(i))/len(weights) for i in trees}
return wdict
|
def function[worker, parameter[self]]:
constant[
Calculates the quartet weights for the test at a random
subsampled chunk of loci.
]
variable[fullseqs] assign[=] call[name[self].sample_loci, parameter[]]
variable[liters] assign[=] call[name[itertools].product, parameter[<ast.Starred object at 0x7da1aff35090>]]
variable[hashval] assign[=] call[name[uuid].uuid4, parameter[]].hex
variable[weights] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1aff35f90>, <ast.Name object at 0x7da1aff36e90>]]] in starred[call[name[enumerate], parameter[name[liters]]]] begin[:]
<ast.Tuple object at 0x7da1aff37cd0> assign[=] name[lidx]
variable[sub] assign[=] dictionary[[], []]
for taget[name[i]] in starred[name[lidx]] begin[:]
if compare[call[name[self].rmap][name[i]] equal[==] constant[p1]] begin[:]
call[name[sub]][constant[A]] assign[=] call[name[fullseqs]][name[i]]
variable[nex] assign[=] list[[]]
for taget[name[tax]] in starred[call[name[list], parameter[constant[ABCD]]]] begin[:]
call[name[nex].append, parameter[call[constant[>{} {}].format, parameter[name[tax], call[name[sub]][name[tax]]]]]]
<ast.Tuple object at 0x7da1aff35c00> assign[=] call[name[count_var], parameter[name[nex]]]
if compare[name[nvar] greater[>] name[self].minsnps] begin[:]
variable[nexus] assign[=] binary_operation[call[constant[{} {}
].format, parameter[constant[4], call[name[len], parameter[call[name[fullseqs]][name[a]]]]]] + call[constant[
].join, parameter[name[nex]]]]
variable[treeorder] assign[=] call[name[self].run_tree_inference, parameter[name[nexus], call[constant[{}.{}].format, parameter[name[hashval], name[ridx]]]]]
call[name[weights].append, parameter[name[treeorder]]]
variable[rfiles] assign[=] call[name[glob].glob, parameter[call[name[os].path.join, parameter[name[tempfile].tempdir, call[constant[*{}*].format, parameter[name[hashval]]]]]]]
for taget[name[rfile]] in starred[name[rfiles]] begin[:]
if call[name[os].path.exists, parameter[name[rfile]]] begin[:]
call[name[os].remove, parameter[name[rfile]]]
variable[trees] assign[=] list[[<ast.Constant object at 0x7da1b0065270>, <ast.Constant object at 0x7da1b0067010>, <ast.Constant object at 0x7da1b0065630>]]
variable[wdict] assign[=] <ast.DictComp object at 0x7da1b0065ff0>
return[name[wdict]]
|
keyword[def] identifier[worker] ( identifier[self] ):
literal[string]
identifier[fullseqs] = identifier[self] . identifier[sample_loci] ()
identifier[liters] = identifier[itertools] . identifier[product] (* identifier[self] . identifier[imap] . identifier[values] ())
identifier[hashval] = identifier[uuid] . identifier[uuid4] (). identifier[hex]
identifier[weights] =[]
keyword[for] identifier[ridx] , identifier[lidx] keyword[in] identifier[enumerate] ( identifier[liters] ):
identifier[a] , identifier[b] , identifier[c] , identifier[d] = identifier[lidx]
identifier[sub] ={}
keyword[for] identifier[i] keyword[in] identifier[lidx] :
keyword[if] identifier[self] . identifier[rmap] [ identifier[i] ]== literal[string] :
identifier[sub] [ literal[string] ]= identifier[fullseqs] [ identifier[i] ]
keyword[elif] identifier[self] . identifier[rmap] [ identifier[i] ]== literal[string] :
identifier[sub] [ literal[string] ]= identifier[fullseqs] [ identifier[i] ]
keyword[elif] identifier[self] . identifier[rmap] [ identifier[i] ]== literal[string] :
identifier[sub] [ literal[string] ]= identifier[fullseqs] [ identifier[i] ]
keyword[else] :
identifier[sub] [ literal[string] ]= identifier[fullseqs] [ identifier[i] ]
identifier[nex] =[]
keyword[for] identifier[tax] keyword[in] identifier[list] ( literal[string] ):
identifier[nex] . identifier[append] ( literal[string] . identifier[format] ( identifier[tax] , identifier[sub] [ identifier[tax] ]))
identifier[nsites] , identifier[nvar] = identifier[count_var] ( identifier[nex] )
keyword[if] identifier[nvar] > identifier[self] . identifier[minsnps] :
identifier[nexus] = literal[string] . identifier[format] ( literal[int] , identifier[len] ( identifier[fullseqs] [ identifier[a] ]))+ literal[string] . identifier[join] ( identifier[nex] )
identifier[treeorder] = identifier[self] . identifier[run_tree_inference] ( identifier[nexus] , literal[string] . identifier[format] ( identifier[hashval] , identifier[ridx] ))
identifier[weights] . identifier[append] ( identifier[treeorder] )
identifier[rfiles] = identifier[glob] . identifier[glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[tempfile] . identifier[tempdir] , literal[string] . identifier[format] ( identifier[hashval] )))
keyword[for] identifier[rfile] keyword[in] identifier[rfiles] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[rfile] ):
identifier[os] . identifier[remove] ( identifier[rfile] )
identifier[trees] =[ literal[string] , literal[string] , literal[string] ]
identifier[wdict] ={ identifier[i] : identifier[float] ( identifier[weights] . identifier[count] ( identifier[i] ))/ identifier[len] ( identifier[weights] ) keyword[for] identifier[i] keyword[in] identifier[trees] }
keyword[return] identifier[wdict]
|
def worker(self):
"""
Calculates the quartet weights for the test at a random
subsampled chunk of loci.
""" ## subsample loci
fullseqs = self.sample_loci()
## find all iterations of samples for this quartet
liters = itertools.product(*self.imap.values())
## run tree inference for each iteration of sampledict
hashval = uuid.uuid4().hex
weights = []
for (ridx, lidx) in enumerate(liters):
## get subalignment for this iteration and make to nex
(a, b, c, d) = lidx
sub = {}
for i in lidx:
if self.rmap[i] == 'p1':
sub['A'] = fullseqs[i] # depends on [control=['if'], data=[]]
elif self.rmap[i] == 'p2':
sub['B'] = fullseqs[i] # depends on [control=['if'], data=[]]
elif self.rmap[i] == 'p3':
sub['C'] = fullseqs[i] # depends on [control=['if'], data=[]]
else:
sub['D'] = fullseqs[i] # depends on [control=['for'], data=['i']]
## write as nexus file
nex = []
for tax in list('ABCD'):
nex.append('>{} {}'.format(tax, sub[tax])) # depends on [control=['for'], data=['tax']]
## check for too much missing or lack of variants
(nsites, nvar) = count_var(nex)
## only run test if there's variation present
if nvar > self.minsnps:
## format as nexus file
nexus = '{} {}\n'.format(4, len(fullseqs[a])) + '\n'.join(nex)
## infer ML tree
treeorder = self.run_tree_inference(nexus, '{}.{}'.format(hashval, ridx))
## add to list
weights.append(treeorder) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
## cleanup - remove all files with the hash val
rfiles = glob.glob(os.path.join(tempfile.tempdir, '*{}*'.format(hashval)))
for rfile in rfiles:
if os.path.exists(rfile):
os.remove(rfile) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rfile']]
## return result as weights for the set topologies.
trees = ['ABCD', 'ACBD', 'ADBC']
wdict = {i: float(weights.count(i)) / len(weights) for i in trees}
return wdict
|
def utc(self, year, month=1, day=1, hour=0, minute=0, second=0.0):
"""Build a `Time` from a UTC calendar date.
You can either specify the date as separate components, or
provide a time zone aware Python datetime. The following two
calls are equivalent (the ``utc`` time zone object can be
imported from the ``skyfield.api`` module, or from ``pytz`` if
you have it)::
ts.utc(2014, 1, 18, 1, 35, 37.5)
ts.utc(datetime(2014, 1, 18, 1, 35, 37, 500000, tzinfo=utc))
Note that only by passing the components separately can you
specify a leap second, because a Python datetime will not allow
the value 60 in its seconds field.
"""
if isinstance(year, datetime):
dt = year
tai = _utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt)
elif isinstance(year, date):
d = year
tai = _utc_date_to_tai(self.leap_dates, self.leap_offsets, d)
elif hasattr(year, '__len__') and isinstance(year[0], datetime):
# TODO: clean this up and better document the possibilities.
list_of_datetimes = year
tai = array([
_utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt)
for dt in list_of_datetimes])
else:
tai = _utc_to_tai(self.leap_dates, self.leap_offsets,
_to_array(year), _to_array(month),
_to_array(day), _to_array(hour),
_to_array(minute), _to_array(second))
t = Time(self, tai + tt_minus_tai)
t.tai = tai
return t
|
def function[utc, parameter[self, year, month, day, hour, minute, second]]:
constant[Build a `Time` from a UTC calendar date.
You can either specify the date as separate components, or
provide a time zone aware Python datetime. The following two
calls are equivalent (the ``utc`` time zone object can be
imported from the ``skyfield.api`` module, or from ``pytz`` if
you have it)::
ts.utc(2014, 1, 18, 1, 35, 37.5)
ts.utc(datetime(2014, 1, 18, 1, 35, 37, 500000, tzinfo=utc))
Note that only by passing the components separately can you
specify a leap second, because a Python datetime will not allow
the value 60 in its seconds field.
]
if call[name[isinstance], parameter[name[year], name[datetime]]] begin[:]
variable[dt] assign[=] name[year]
variable[tai] assign[=] call[name[_utc_datetime_to_tai], parameter[name[self].leap_dates, name[self].leap_offsets, name[dt]]]
variable[t] assign[=] call[name[Time], parameter[name[self], binary_operation[name[tai] + name[tt_minus_tai]]]]
name[t].tai assign[=] name[tai]
return[name[t]]
|
keyword[def] identifier[utc] ( identifier[self] , identifier[year] , identifier[month] = literal[int] , identifier[day] = literal[int] , identifier[hour] = literal[int] , identifier[minute] = literal[int] , identifier[second] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[year] , identifier[datetime] ):
identifier[dt] = identifier[year]
identifier[tai] = identifier[_utc_datetime_to_tai] ( identifier[self] . identifier[leap_dates] , identifier[self] . identifier[leap_offsets] , identifier[dt] )
keyword[elif] identifier[isinstance] ( identifier[year] , identifier[date] ):
identifier[d] = identifier[year]
identifier[tai] = identifier[_utc_date_to_tai] ( identifier[self] . identifier[leap_dates] , identifier[self] . identifier[leap_offsets] , identifier[d] )
keyword[elif] identifier[hasattr] ( identifier[year] , literal[string] ) keyword[and] identifier[isinstance] ( identifier[year] [ literal[int] ], identifier[datetime] ):
identifier[list_of_datetimes] = identifier[year]
identifier[tai] = identifier[array] ([
identifier[_utc_datetime_to_tai] ( identifier[self] . identifier[leap_dates] , identifier[self] . identifier[leap_offsets] , identifier[dt] )
keyword[for] identifier[dt] keyword[in] identifier[list_of_datetimes] ])
keyword[else] :
identifier[tai] = identifier[_utc_to_tai] ( identifier[self] . identifier[leap_dates] , identifier[self] . identifier[leap_offsets] ,
identifier[_to_array] ( identifier[year] ), identifier[_to_array] ( identifier[month] ),
identifier[_to_array] ( identifier[day] ), identifier[_to_array] ( identifier[hour] ),
identifier[_to_array] ( identifier[minute] ), identifier[_to_array] ( identifier[second] ))
identifier[t] = identifier[Time] ( identifier[self] , identifier[tai] + identifier[tt_minus_tai] )
identifier[t] . identifier[tai] = identifier[tai]
keyword[return] identifier[t]
|
def utc(self, year, month=1, day=1, hour=0, minute=0, second=0.0):
"""Build a `Time` from a UTC calendar date.
You can either specify the date as separate components, or
provide a time zone aware Python datetime. The following two
calls are equivalent (the ``utc`` time zone object can be
imported from the ``skyfield.api`` module, or from ``pytz`` if
you have it)::
ts.utc(2014, 1, 18, 1, 35, 37.5)
ts.utc(datetime(2014, 1, 18, 1, 35, 37, 500000, tzinfo=utc))
Note that only by passing the components separately can you
specify a leap second, because a Python datetime will not allow
the value 60 in its seconds field.
"""
if isinstance(year, datetime):
dt = year
tai = _utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt) # depends on [control=['if'], data=[]]
elif isinstance(year, date):
d = year
tai = _utc_date_to_tai(self.leap_dates, self.leap_offsets, d) # depends on [control=['if'], data=[]]
elif hasattr(year, '__len__') and isinstance(year[0], datetime):
# TODO: clean this up and better document the possibilities.
list_of_datetimes = year
tai = array([_utc_datetime_to_tai(self.leap_dates, self.leap_offsets, dt) for dt in list_of_datetimes]) # depends on [control=['if'], data=[]]
else:
tai = _utc_to_tai(self.leap_dates, self.leap_offsets, _to_array(year), _to_array(month), _to_array(day), _to_array(hour), _to_array(minute), _to_array(second))
t = Time(self, tai + tt_minus_tai)
t.tai = tai
return t
|
def clean_filters(filters: Mapping = None) -> str:
"""
Checks the values inside `filters`
https://docs.docker.com/engine/api/v1.29/#operation/ServiceList
Returns a new dictionary in the format `map[string][]string` jsonized
"""
if filters and isinstance(filters, dict):
for k, v in filters.items():
if not isinstance(v, list):
v = [v]
filters[k] = v
return json.dumps(filters)
|
def function[clean_filters, parameter[filters]]:
constant[
Checks the values inside `filters`
https://docs.docker.com/engine/api/v1.29/#operation/ServiceList
Returns a new dictionary in the format `map[string][]string` jsonized
]
if <ast.BoolOp object at 0x7da1b08d54e0> begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b08d6950>, <ast.Name object at 0x7da1b08d4c10>]]] in starred[call[name[filters].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b08d57b0> begin[:]
variable[v] assign[=] list[[<ast.Name object at 0x7da1b0847b50>]]
call[name[filters]][name[k]] assign[=] name[v]
return[call[name[json].dumps, parameter[name[filters]]]]
|
keyword[def] identifier[clean_filters] ( identifier[filters] : identifier[Mapping] = keyword[None] )-> identifier[str] :
literal[string]
keyword[if] identifier[filters] keyword[and] identifier[isinstance] ( identifier[filters] , identifier[dict] ):
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[filters] . identifier[items] ():
keyword[if] keyword[not] identifier[isinstance] ( identifier[v] , identifier[list] ):
identifier[v] =[ identifier[v] ]
identifier[filters] [ identifier[k] ]= identifier[v]
keyword[return] identifier[json] . identifier[dumps] ( identifier[filters] )
|
def clean_filters(filters: Mapping=None) -> str:
"""
Checks the values inside `filters`
https://docs.docker.com/engine/api/v1.29/#operation/ServiceList
Returns a new dictionary in the format `map[string][]string` jsonized
"""
if filters and isinstance(filters, dict):
for (k, v) in filters.items():
if not isinstance(v, list):
v = [v] # depends on [control=['if'], data=[]]
filters[k] = v # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return json.dumps(filters)
|
def statement(self):
"""
statement : assign_statement
| expression
| control
| empty
Feature For Loop adds:
| loop
Feature Func adds:
| func
| return statement
"""
if self.cur_token.type == TokenTypes.VAR:
self.tokenizer.start_saving(self.cur_token)
self.variable()
peek_var = self.cur_token
self.tokenizer.replay()
self.eat()
if peek_var.type == TokenTypes.ASSIGN:
return self.assign_statement()
else:
return self.expression()
elif self.cur_token.type in TokenTypes.control(self.features):
return self.control()
elif self.cur_token.type in TokenTypes.loop(self.features):
return self.loop()
elif self.cur_token.type in TokenTypes.func(self.features):
if self.cur_token.type == TokenTypes.FUNC:
return self.func()
elif self.cur_token.type == TokenTypes.RETURN:
return self.return_statement()
self.error("Invalid token or unfinished statement")
|
def function[statement, parameter[self]]:
constant[
statement : assign_statement
| expression
| control
| empty
Feature For Loop adds:
| loop
Feature Func adds:
| func
| return statement
]
if compare[name[self].cur_token.type equal[==] name[TokenTypes].VAR] begin[:]
call[name[self].tokenizer.start_saving, parameter[name[self].cur_token]]
call[name[self].variable, parameter[]]
variable[peek_var] assign[=] name[self].cur_token
call[name[self].tokenizer.replay, parameter[]]
call[name[self].eat, parameter[]]
if compare[name[peek_var].type equal[==] name[TokenTypes].ASSIGN] begin[:]
return[call[name[self].assign_statement, parameter[]]]
call[name[self].error, parameter[constant[Invalid token or unfinished statement]]]
|
keyword[def] identifier[statement] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[cur_token] . identifier[type] == identifier[TokenTypes] . identifier[VAR] :
identifier[self] . identifier[tokenizer] . identifier[start_saving] ( identifier[self] . identifier[cur_token] )
identifier[self] . identifier[variable] ()
identifier[peek_var] = identifier[self] . identifier[cur_token]
identifier[self] . identifier[tokenizer] . identifier[replay] ()
identifier[self] . identifier[eat] ()
keyword[if] identifier[peek_var] . identifier[type] == identifier[TokenTypes] . identifier[ASSIGN] :
keyword[return] identifier[self] . identifier[assign_statement] ()
keyword[else] :
keyword[return] identifier[self] . identifier[expression] ()
keyword[elif] identifier[self] . identifier[cur_token] . identifier[type] keyword[in] identifier[TokenTypes] . identifier[control] ( identifier[self] . identifier[features] ):
keyword[return] identifier[self] . identifier[control] ()
keyword[elif] identifier[self] . identifier[cur_token] . identifier[type] keyword[in] identifier[TokenTypes] . identifier[loop] ( identifier[self] . identifier[features] ):
keyword[return] identifier[self] . identifier[loop] ()
keyword[elif] identifier[self] . identifier[cur_token] . identifier[type] keyword[in] identifier[TokenTypes] . identifier[func] ( identifier[self] . identifier[features] ):
keyword[if] identifier[self] . identifier[cur_token] . identifier[type] == identifier[TokenTypes] . identifier[FUNC] :
keyword[return] identifier[self] . identifier[func] ()
keyword[elif] identifier[self] . identifier[cur_token] . identifier[type] == identifier[TokenTypes] . identifier[RETURN] :
keyword[return] identifier[self] . identifier[return_statement] ()
identifier[self] . identifier[error] ( literal[string] )
|
def statement(self):
"""
statement : assign_statement
| expression
| control
| empty
Feature For Loop adds:
| loop
Feature Func adds:
| func
| return statement
"""
if self.cur_token.type == TokenTypes.VAR:
self.tokenizer.start_saving(self.cur_token)
self.variable()
peek_var = self.cur_token
self.tokenizer.replay()
self.eat()
if peek_var.type == TokenTypes.ASSIGN:
return self.assign_statement() # depends on [control=['if'], data=[]]
else:
return self.expression() # depends on [control=['if'], data=[]]
elif self.cur_token.type in TokenTypes.control(self.features):
return self.control() # depends on [control=['if'], data=[]]
elif self.cur_token.type in TokenTypes.loop(self.features):
return self.loop() # depends on [control=['if'], data=[]]
elif self.cur_token.type in TokenTypes.func(self.features):
if self.cur_token.type == TokenTypes.FUNC:
return self.func() # depends on [control=['if'], data=[]]
elif self.cur_token.type == TokenTypes.RETURN:
return self.return_statement() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.error('Invalid token or unfinished statement')
|
def WriteFileFooter(self):
"""Writes the file footer (finished the file)."""
if not self._stream:
raise ArchiveAlreadyClosedError(
"Attempting to write to a ZIP archive that was already closed.")
if self.cur_cmpr:
buf = self.cur_cmpr.flush()
self.cur_compress_size += len(buf)
self.cur_zinfo.compress_size = self.cur_compress_size
self._stream.write(buf)
else:
self.cur_zinfo.compress_size = self.cur_file_size
self.cur_zinfo.CRC = self.cur_crc
self.cur_zinfo.file_size = self.cur_file_size
# The zip footer has a 8 bytes limit for sizes so if we compress a
# file larger than 4 GB, the code below will not work. The ZIP64
# convention is to write 0xffffffff for compressed and
# uncompressed size in those cases. The actual size is written by
# the library for us anyways so those fields are redundant.
cur_file_size = min(0xffffffff, self.cur_file_size)
cur_compress_size = min(0xffffffff, self.cur_compress_size)
# Writing data descriptor ZIP64-way by default. We never know how large
# the archive may become as we're generating it dynamically.
#
# crc-32 8 bytes (little endian)
# compressed size 8 bytes (little endian)
# uncompressed size 8 bytes (little endian)
self._stream.write(
struct.pack("<LLL", self.cur_crc, cur_compress_size, cur_file_size))
# Register the file in the zip file, so that central directory gets
# written correctly.
self._zip_fd.filelist.append(self.cur_zinfo)
self._zip_fd.NameToInfo[self.cur_zinfo.filename] = self.cur_zinfo
self._ResetState()
return self._stream.GetValueAndReset()
|
def function[WriteFileFooter, parameter[self]]:
constant[Writes the file footer (finished the file).]
if <ast.UnaryOp object at 0x7da1b1c0f610> begin[:]
<ast.Raise object at 0x7da1b1c0f550>
if name[self].cur_cmpr begin[:]
variable[buf] assign[=] call[name[self].cur_cmpr.flush, parameter[]]
<ast.AugAssign object at 0x7da1b1c0f5e0>
name[self].cur_zinfo.compress_size assign[=] name[self].cur_compress_size
call[name[self]._stream.write, parameter[name[buf]]]
name[self].cur_zinfo.CRC assign[=] name[self].cur_crc
name[self].cur_zinfo.file_size assign[=] name[self].cur_file_size
variable[cur_file_size] assign[=] call[name[min], parameter[constant[4294967295], name[self].cur_file_size]]
variable[cur_compress_size] assign[=] call[name[min], parameter[constant[4294967295], name[self].cur_compress_size]]
call[name[self]._stream.write, parameter[call[name[struct].pack, parameter[constant[<LLL], name[self].cur_crc, name[cur_compress_size], name[cur_file_size]]]]]
call[name[self]._zip_fd.filelist.append, parameter[name[self].cur_zinfo]]
call[name[self]._zip_fd.NameToInfo][name[self].cur_zinfo.filename] assign[=] name[self].cur_zinfo
call[name[self]._ResetState, parameter[]]
return[call[name[self]._stream.GetValueAndReset, parameter[]]]
|
keyword[def] identifier[WriteFileFooter] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_stream] :
keyword[raise] identifier[ArchiveAlreadyClosedError] (
literal[string] )
keyword[if] identifier[self] . identifier[cur_cmpr] :
identifier[buf] = identifier[self] . identifier[cur_cmpr] . identifier[flush] ()
identifier[self] . identifier[cur_compress_size] += identifier[len] ( identifier[buf] )
identifier[self] . identifier[cur_zinfo] . identifier[compress_size] = identifier[self] . identifier[cur_compress_size]
identifier[self] . identifier[_stream] . identifier[write] ( identifier[buf] )
keyword[else] :
identifier[self] . identifier[cur_zinfo] . identifier[compress_size] = identifier[self] . identifier[cur_file_size]
identifier[self] . identifier[cur_zinfo] . identifier[CRC] = identifier[self] . identifier[cur_crc]
identifier[self] . identifier[cur_zinfo] . identifier[file_size] = identifier[self] . identifier[cur_file_size]
identifier[cur_file_size] = identifier[min] ( literal[int] , identifier[self] . identifier[cur_file_size] )
identifier[cur_compress_size] = identifier[min] ( literal[int] , identifier[self] . identifier[cur_compress_size] )
identifier[self] . identifier[_stream] . identifier[write] (
identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[cur_crc] , identifier[cur_compress_size] , identifier[cur_file_size] ))
identifier[self] . identifier[_zip_fd] . identifier[filelist] . identifier[append] ( identifier[self] . identifier[cur_zinfo] )
identifier[self] . identifier[_zip_fd] . identifier[NameToInfo] [ identifier[self] . identifier[cur_zinfo] . identifier[filename] ]= identifier[self] . identifier[cur_zinfo]
identifier[self] . identifier[_ResetState] ()
keyword[return] identifier[self] . identifier[_stream] . identifier[GetValueAndReset] ()
|
def WriteFileFooter(self):
"""Writes the file footer (finished the file)."""
if not self._stream:
raise ArchiveAlreadyClosedError('Attempting to write to a ZIP archive that was already closed.') # depends on [control=['if'], data=[]]
if self.cur_cmpr:
buf = self.cur_cmpr.flush()
self.cur_compress_size += len(buf)
self.cur_zinfo.compress_size = self.cur_compress_size
self._stream.write(buf) # depends on [control=['if'], data=[]]
else:
self.cur_zinfo.compress_size = self.cur_file_size
self.cur_zinfo.CRC = self.cur_crc
self.cur_zinfo.file_size = self.cur_file_size
# The zip footer has a 8 bytes limit for sizes so if we compress a
# file larger than 4 GB, the code below will not work. The ZIP64
# convention is to write 0xffffffff for compressed and
# uncompressed size in those cases. The actual size is written by
# the library for us anyways so those fields are redundant.
cur_file_size = min(4294967295, self.cur_file_size)
cur_compress_size = min(4294967295, self.cur_compress_size)
# Writing data descriptor ZIP64-way by default. We never know how large
# the archive may become as we're generating it dynamically.
#
# crc-32 8 bytes (little endian)
# compressed size 8 bytes (little endian)
# uncompressed size 8 bytes (little endian)
self._stream.write(struct.pack('<LLL', self.cur_crc, cur_compress_size, cur_file_size))
# Register the file in the zip file, so that central directory gets
# written correctly.
self._zip_fd.filelist.append(self.cur_zinfo)
self._zip_fd.NameToInfo[self.cur_zinfo.filename] = self.cur_zinfo
self._ResetState()
return self._stream.GetValueAndReset()
|
def str(self, space=False):
"""String version. Set space=True to line them all up nicely."""
return "%s/%s (%s)" % (str(int(self.id)).rjust(space and 4),
self.name.ljust(space and 50),
"; ".join(str(a) for a in self.args))
|
def function[str, parameter[self, space]]:
constant[String version. Set space=True to line them all up nicely.]
return[binary_operation[constant[%s/%s (%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18f00e500>, <ast.Call object at 0x7da18f00d6c0>, <ast.Call object at 0x7da18f00e290>]]]]
|
keyword[def] identifier[str] ( identifier[self] , identifier[space] = keyword[False] ):
literal[string]
keyword[return] literal[string] %( identifier[str] ( identifier[int] ( identifier[self] . identifier[id] )). identifier[rjust] ( identifier[space] keyword[and] literal[int] ),
identifier[self] . identifier[name] . identifier[ljust] ( identifier[space] keyword[and] literal[int] ),
literal[string] . identifier[join] ( identifier[str] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[self] . identifier[args] ))
|
def str(self, space=False):
"""String version. Set space=True to line them all up nicely."""
return '%s/%s (%s)' % (str(int(self.id)).rjust(space and 4), self.name.ljust(space and 50), '; '.join((str(a) for a in self.args)))
|
def get_recurring_bill_by_subscription(self, subscription_id):
"""
Consulta de las facturas que están pagadas o pendientes por pagar. Se puede consultar por cliente,
por suscripción o por rango de fechas.
Args:
subscription_id:
Returns:
"""
params = {
"subscriptionId": subscription_id,
}
return self.client._get(self.url + 'recurringBill', params=params, headers=self.get_headers())
|
def function[get_recurring_bill_by_subscription, parameter[self, subscription_id]]:
constant[
Consulta de las facturas que están pagadas o pendientes por pagar. Se puede consultar por cliente,
por suscripción o por rango de fechas.
Args:
subscription_id:
Returns:
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da204961f30>], [<ast.Name object at 0x7da204962a70>]]
return[call[name[self].client._get, parameter[binary_operation[name[self].url + constant[recurringBill]]]]]
|
keyword[def] identifier[get_recurring_bill_by_subscription] ( identifier[self] , identifier[subscription_id] ):
literal[string]
identifier[params] ={
literal[string] : identifier[subscription_id] ,
}
keyword[return] identifier[self] . identifier[client] . identifier[_get] ( identifier[self] . identifier[url] + literal[string] , identifier[params] = identifier[params] , identifier[headers] = identifier[self] . identifier[get_headers] ())
|
def get_recurring_bill_by_subscription(self, subscription_id):
"""
Consulta de las facturas que están pagadas o pendientes por pagar. Se puede consultar por cliente,
por suscripción o por rango de fechas.
Args:
subscription_id:
Returns:
"""
params = {'subscriptionId': subscription_id}
return self.client._get(self.url + 'recurringBill', params=params, headers=self.get_headers())
|
def get(self, sid):
"""
Constructs a EventContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.event.EventContext
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventContext
"""
return EventContext(self._version, workspace_sid=self._solution['workspace_sid'], sid=sid, )
|
def function[get, parameter[self, sid]]:
constant[
Constructs a EventContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.event.EventContext
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventContext
]
return[call[name[EventContext], parameter[name[self]._version]]]
|
keyword[def] identifier[get] ( identifier[self] , identifier[sid] ):
literal[string]
keyword[return] identifier[EventContext] ( identifier[self] . identifier[_version] , identifier[workspace_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[sid] = identifier[sid] ,)
|
def get(self, sid):
"""
Constructs a EventContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.event.EventContext
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventContext
"""
return EventContext(self._version, workspace_sid=self._solution['workspace_sid'], sid=sid)
|
def create(self, **kwargs):
"""Create the resource on the BIG-IP®.
Uses HTTP POST to the `collection` URI to create a resource associated
with a new unique URI on the device.
..
If you do a create with inheritedTrafficGroup set to 'false' you
must also have a trafficGroup. This pattern generalizes like so:
If the presence of a param implies an additional required param,
then simply
self._meta_data['required_creation_params'].update(IMPLIED),
before the call to self._create(**kwargs), wherein req params are
checked.
We refer to this property as "implied-required parameters" because
the presence of one parameter, or parameter value (e.g.
inheritedTrafficGroup), implies that another parameter is required.
.. note::
If you are creating with ``inheritedTrafficGroup` set to
:obj:`False` you just also have a `trafficGroup`.
:param kwargs: All the key-values needed to create the resource
:returns: ``self`` - A python object that represents the object's
configuration and state on the BIG-IP®.
"""
itg = kwargs.get('inheritedTrafficGroup', None)
if itg and itg == 'false':
self._meta_data['required_creation_parameters'].\
update(('trafficGroup',))
try:
if not kwargs['trafficGroup']:
raise MissingRequiredCreationParameter(
"trafficGroup must not be falsey but it's: %r"
% kwargs['trafficGroup'])
except KeyError:
pass
new_instance = self._create(**kwargs)
return new_instance
|
def function[create, parameter[self]]:
constant[Create the resource on the BIG-IP®.
Uses HTTP POST to the `collection` URI to create a resource associated
with a new unique URI on the device.
..
If you do a create with inheritedTrafficGroup set to 'false' you
must also have a trafficGroup. This pattern generalizes like so:
If the presence of a param implies an additional required param,
then simply
self._meta_data['required_creation_params'].update(IMPLIED),
before the call to self._create(**kwargs), wherein req params are
checked.
We refer to this property as "implied-required parameters" because
the presence of one parameter, or parameter value (e.g.
inheritedTrafficGroup), implies that another parameter is required.
.. note::
If you are creating with ``inheritedTrafficGroup` set to
:obj:`False` you just also have a `trafficGroup`.
:param kwargs: All the key-values needed to create the resource
:returns: ``self`` - A python object that represents the object's
configuration and state on the BIG-IP®.
]
variable[itg] assign[=] call[name[kwargs].get, parameter[constant[inheritedTrafficGroup], constant[None]]]
if <ast.BoolOp object at 0x7da20c6a8130> begin[:]
call[call[name[self]._meta_data][constant[required_creation_parameters]].update, parameter[tuple[[<ast.Constant object at 0x7da2043444c0>]]]]
<ast.Try object at 0x7da204346710>
variable[new_instance] assign[=] call[name[self]._create, parameter[]]
return[name[new_instance]]
|
keyword[def] identifier[create] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[itg] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[itg] keyword[and] identifier[itg] == literal[string] :
identifier[self] . identifier[_meta_data] [ literal[string] ]. identifier[update] (( literal[string] ,))
keyword[try] :
keyword[if] keyword[not] identifier[kwargs] [ literal[string] ]:
keyword[raise] identifier[MissingRequiredCreationParameter] (
literal[string]
% identifier[kwargs] [ literal[string] ])
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[new_instance] = identifier[self] . identifier[_create] (** identifier[kwargs] )
keyword[return] identifier[new_instance]
|
def create(self, **kwargs):
"""Create the resource on the BIG-IP®.
Uses HTTP POST to the `collection` URI to create a resource associated
with a new unique URI on the device.
..
If you do a create with inheritedTrafficGroup set to 'false' you
must also have a trafficGroup. This pattern generalizes like so:
If the presence of a param implies an additional required param,
then simply
self._meta_data['required_creation_params'].update(IMPLIED),
before the call to self._create(**kwargs), wherein req params are
checked.
We refer to this property as "implied-required parameters" because
the presence of one parameter, or parameter value (e.g.
inheritedTrafficGroup), implies that another parameter is required.
.. note::
If you are creating with ``inheritedTrafficGroup` set to
:obj:`False` you just also have a `trafficGroup`.
:param kwargs: All the key-values needed to create the resource
:returns: ``self`` - A python object that represents the object's
configuration and state on the BIG-IP®.
"""
itg = kwargs.get('inheritedTrafficGroup', None)
if itg and itg == 'false':
self._meta_data['required_creation_parameters'].update(('trafficGroup',))
try:
if not kwargs['trafficGroup']:
raise MissingRequiredCreationParameter("trafficGroup must not be falsey but it's: %r" % kwargs['trafficGroup']) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
new_instance = self._create(**kwargs)
return new_instance
|
async def loadCoreModule(self, ctor, conf=None):
'''
Load a single cortex module with the given ctor and conf.
Args:
ctor (str): The python module class path
conf (dict):Config dictionary for the module
'''
if conf is None:
conf = {}
modu = self._loadCoreModule(ctor, conf=conf)
try:
await s_coro.ornot(modu.preCoreModule)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
logger.exception(f'module preCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
mdefs = modu.getModelDefs()
self.model.addDataModels(mdefs)
cmds = modu.getStormCmds()
[self.addStormCmd(c) for c in cmds]
try:
await s_coro.ornot(modu.initCoreModule)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
logger.exception(f'module initCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
await self.fire('core:module:load', module=ctor)
return modu
|
<ast.AsyncFunctionDef object at 0x7da20c76e440>
|
keyword[async] keyword[def] identifier[loadCoreModule] ( identifier[self] , identifier[ctor] , identifier[conf] = keyword[None] ):
literal[string]
keyword[if] identifier[conf] keyword[is] keyword[None] :
identifier[conf] ={}
identifier[modu] = identifier[self] . identifier[_loadCoreModule] ( identifier[ctor] , identifier[conf] = identifier[conf] )
keyword[try] :
keyword[await] identifier[s_coro] . identifier[ornot] ( identifier[modu] . identifier[preCoreModule] )
keyword[except] identifier[asyncio] . identifier[CancelledError] :
keyword[raise]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] )
identifier[self] . identifier[modules] . identifier[pop] ( identifier[ctor] , keyword[None] )
keyword[return]
identifier[mdefs] = identifier[modu] . identifier[getModelDefs] ()
identifier[self] . identifier[model] . identifier[addDataModels] ( identifier[mdefs] )
identifier[cmds] = identifier[modu] . identifier[getStormCmds] ()
[ identifier[self] . identifier[addStormCmd] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[cmds] ]
keyword[try] :
keyword[await] identifier[s_coro] . identifier[ornot] ( identifier[modu] . identifier[initCoreModule] )
keyword[except] identifier[asyncio] . identifier[CancelledError] :
keyword[raise]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] )
identifier[self] . identifier[modules] . identifier[pop] ( identifier[ctor] , keyword[None] )
keyword[return]
keyword[await] identifier[self] . identifier[fire] ( literal[string] , identifier[module] = identifier[ctor] )
keyword[return] identifier[modu]
|
async def loadCoreModule(self, ctor, conf=None):
"""
Load a single cortex module with the given ctor and conf.
Args:
ctor (str): The python module class path
conf (dict):Config dictionary for the module
"""
if conf is None:
conf = {} # depends on [control=['if'], data=['conf']]
modu = self._loadCoreModule(ctor, conf=conf)
try:
await s_coro.ornot(modu.preCoreModule) # depends on [control=['try'], data=[]]
except asyncio.CancelledError: # pragma: no cover
raise # depends on [control=['except'], data=[]]
except Exception:
logger.exception(f'module preCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return # depends on [control=['except'], data=[]]
mdefs = modu.getModelDefs()
self.model.addDataModels(mdefs)
cmds = modu.getStormCmds()
[self.addStormCmd(c) for c in cmds]
try:
await s_coro.ornot(modu.initCoreModule) # depends on [control=['try'], data=[]]
except asyncio.CancelledError: # pragma: no cover
raise # depends on [control=['except'], data=[]]
except Exception:
logger.exception(f'module initCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return # depends on [control=['except'], data=[]]
await self.fire('core:module:load', module=ctor)
return modu
|
def _update_top_pipeline(self):
"""Helper function to update the _optimized_pipeline field."""
# Store the pipeline with the highest internal testing score
if self._pareto_front:
self._optimized_pipeline_score = -float('inf')
for pipeline, pipeline_scores in zip(self._pareto_front.items, reversed(self._pareto_front.keys)):
if pipeline_scores.wvalues[1] > self._optimized_pipeline_score:
self._optimized_pipeline = pipeline
self._optimized_pipeline_score = pipeline_scores.wvalues[1]
if not self._optimized_pipeline:
raise RuntimeError('There was an error in the TPOT optimization '
'process. This could be because the data was '
'not formatted properly, or because data for '
'a regression problem was provided to the '
'TPOTClassifier object. Please make sure you '
'passed the data to TPOT correctly.')
else:
pareto_front_wvalues = [pipeline_scores.wvalues[1] for pipeline_scores in self._pareto_front.keys]
if not self._last_optimized_pareto_front:
self._last_optimized_pareto_front = pareto_front_wvalues
elif self._last_optimized_pareto_front == pareto_front_wvalues:
self._last_optimized_pareto_front_n_gens += 1
else:
self._last_optimized_pareto_front = pareto_front_wvalues
self._last_optimized_pareto_front_n_gens = 0
else:
# If user passes CTRL+C in initial generation, self._pareto_front (halloffame) shoule be not updated yet.
# need raise RuntimeError because no pipeline has been optimized
raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.')
|
def function[_update_top_pipeline, parameter[self]]:
constant[Helper function to update the _optimized_pipeline field.]
if name[self]._pareto_front begin[:]
name[self]._optimized_pipeline_score assign[=] <ast.UnaryOp object at 0x7da2041db760>
for taget[tuple[[<ast.Name object at 0x7da2041da380>, <ast.Name object at 0x7da2041d9720>]]] in starred[call[name[zip], parameter[name[self]._pareto_front.items, call[name[reversed], parameter[name[self]._pareto_front.keys]]]]] begin[:]
if compare[call[name[pipeline_scores].wvalues][constant[1]] greater[>] name[self]._optimized_pipeline_score] begin[:]
name[self]._optimized_pipeline assign[=] name[pipeline]
name[self]._optimized_pipeline_score assign[=] call[name[pipeline_scores].wvalues][constant[1]]
if <ast.UnaryOp object at 0x7da2041dabf0> begin[:]
<ast.Raise object at 0x7da204565d50>
|
keyword[def] identifier[_update_top_pipeline] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_pareto_front] :
identifier[self] . identifier[_optimized_pipeline_score] =- identifier[float] ( literal[string] )
keyword[for] identifier[pipeline] , identifier[pipeline_scores] keyword[in] identifier[zip] ( identifier[self] . identifier[_pareto_front] . identifier[items] , identifier[reversed] ( identifier[self] . identifier[_pareto_front] . identifier[keys] )):
keyword[if] identifier[pipeline_scores] . identifier[wvalues] [ literal[int] ]> identifier[self] . identifier[_optimized_pipeline_score] :
identifier[self] . identifier[_optimized_pipeline] = identifier[pipeline]
identifier[self] . identifier[_optimized_pipeline_score] = identifier[pipeline_scores] . identifier[wvalues] [ literal[int] ]
keyword[if] keyword[not] identifier[self] . identifier[_optimized_pipeline] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
keyword[else] :
identifier[pareto_front_wvalues] =[ identifier[pipeline_scores] . identifier[wvalues] [ literal[int] ] keyword[for] identifier[pipeline_scores] keyword[in] identifier[self] . identifier[_pareto_front] . identifier[keys] ]
keyword[if] keyword[not] identifier[self] . identifier[_last_optimized_pareto_front] :
identifier[self] . identifier[_last_optimized_pareto_front] = identifier[pareto_front_wvalues]
keyword[elif] identifier[self] . identifier[_last_optimized_pareto_front] == identifier[pareto_front_wvalues] :
identifier[self] . identifier[_last_optimized_pareto_front_n_gens] += literal[int]
keyword[else] :
identifier[self] . identifier[_last_optimized_pareto_front] = identifier[pareto_front_wvalues]
identifier[self] . identifier[_last_optimized_pareto_front_n_gens] = literal[int]
keyword[else] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
|
def _update_top_pipeline(self):
"""Helper function to update the _optimized_pipeline field."""
# Store the pipeline with the highest internal testing score
if self._pareto_front:
self._optimized_pipeline_score = -float('inf')
for (pipeline, pipeline_scores) in zip(self._pareto_front.items, reversed(self._pareto_front.keys)):
if pipeline_scores.wvalues[1] > self._optimized_pipeline_score:
self._optimized_pipeline = pipeline
self._optimized_pipeline_score = pipeline_scores.wvalues[1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not self._optimized_pipeline:
raise RuntimeError('There was an error in the TPOT optimization process. This could be because the data was not formatted properly, or because data for a regression problem was provided to the TPOTClassifier object. Please make sure you passed the data to TPOT correctly.') # depends on [control=['if'], data=[]]
else:
pareto_front_wvalues = [pipeline_scores.wvalues[1] for pipeline_scores in self._pareto_front.keys]
if not self._last_optimized_pareto_front:
self._last_optimized_pareto_front = pareto_front_wvalues # depends on [control=['if'], data=[]]
elif self._last_optimized_pareto_front == pareto_front_wvalues:
self._last_optimized_pareto_front_n_gens += 1 # depends on [control=['if'], data=[]]
else:
self._last_optimized_pareto_front = pareto_front_wvalues
self._last_optimized_pareto_front_n_gens = 0 # depends on [control=['if'], data=[]]
else:
# If user passes CTRL+C in initial generation, self._pareto_front (halloffame) shoule be not updated yet.
# need raise RuntimeError because no pipeline has been optimized
raise RuntimeError('A pipeline has not yet been optimized. Please call fit() first.')
|
def _set_uplink_switch(self, v, load=False):
"""
Setter method for uplink_switch, mapped from YANG variable /uplink_switch (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_uplink_switch is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_uplink_switch() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=uplink_switch.uplink_switch, is_container='container', presence=False, yang_name="uplink-switch", rest_name="uplink-switch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable/Disable Protected ports capability', u'callpoint': u'global-uplink-switch-cfg-cp', u'sort-priority': u'RUNNCFG_LEVEL_ROUTER_GLOBAL'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """uplink_switch must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=uplink_switch.uplink_switch, is_container='container', presence=False, yang_name="uplink-switch", rest_name="uplink-switch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable/Disable Protected ports capability', u'callpoint': u'global-uplink-switch-cfg-cp', u'sort-priority': u'RUNNCFG_LEVEL_ROUTER_GLOBAL'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__uplink_switch = t
if hasattr(self, '_set'):
self._set()
|
def function[_set_uplink_switch, parameter[self, v, load]]:
constant[
Setter method for uplink_switch, mapped from YANG variable /uplink_switch (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_uplink_switch is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_uplink_switch() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f8137f0>
name[self].__uplink_switch assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]]
|
keyword[def] identifier[_set_uplink_switch] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[uplink_switch] . identifier[uplink_switch] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__uplink_switch] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] ()
|
def _set_uplink_switch(self, v, load=False):
"""
Setter method for uplink_switch, mapped from YANG variable /uplink_switch (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_uplink_switch is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_uplink_switch() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=uplink_switch.uplink_switch, is_container='container', presence=False, yang_name='uplink-switch', rest_name='uplink-switch', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable/Disable Protected ports capability', u'callpoint': u'global-uplink-switch-cfg-cp', u'sort-priority': u'RUNNCFG_LEVEL_ROUTER_GLOBAL'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'uplink_switch must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=uplink_switch.uplink_switch, is_container=\'container\', presence=False, yang_name="uplink-switch", rest_name="uplink-switch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Enable/Disable Protected ports capability\', u\'callpoint\': u\'global-uplink-switch-cfg-cp\', u\'sort-priority\': u\'RUNNCFG_LEVEL_ROUTER_GLOBAL\'}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__uplink_switch = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]]
|
def get_image_data(self, ids=None, voxels=None, dense=True):
""" Slices and returns a subset of image data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return. If None, returns data for all studies.
voxels (list, array): A list or 1D numpy array of voxel indices
(i.e., rows) to return. If None, returns data for all voxels.
dense (bool): Optional boolean. When True (default), convert the
result to a dense array before returning. When False, keep as
sparse matrix.
Returns:
A 2D numpy array with voxels in rows and studies in columns.
"""
if dense and ids is None and voxels is None:
logger.warning(
"Warning: get_image_data() is being called without specifying "
"a subset of studies or voxels to retrieve. This may result in"
" a very large amount of data (several GB) being read into "
"memory. If you experience any problems, consider returning a "
"sparse matrix by passing dense=False, or pass in a list of "
"ids of voxels to retrieve only a portion of the data.")
result = self.data
if ids is not None:
idxs = np.where(np.in1d(np.array(self.ids), np.array(ids)))[0]
result = result[:, idxs]
if voxels is not None:
result = result[voxels, :]
return result.toarray() if dense else result
|
def function[get_image_data, parameter[self, ids, voxels, dense]]:
constant[ Slices and returns a subset of image data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return. If None, returns data for all studies.
voxels (list, array): A list or 1D numpy array of voxel indices
(i.e., rows) to return. If None, returns data for all voxels.
dense (bool): Optional boolean. When True (default), convert the
result to a dense array before returning. When False, keep as
sparse matrix.
Returns:
A 2D numpy array with voxels in rows and studies in columns.
]
if <ast.BoolOp object at 0x7da18ede7a00> begin[:]
call[name[logger].warning, parameter[constant[Warning: get_image_data() is being called without specifying a subset of studies or voxels to retrieve. This may result in a very large amount of data (several GB) being read into memory. If you experience any problems, consider returning a sparse matrix by passing dense=False, or pass in a list of ids of voxels to retrieve only a portion of the data.]]]
variable[result] assign[=] name[self].data
if compare[name[ids] is_not constant[None]] begin[:]
variable[idxs] assign[=] call[call[name[np].where, parameter[call[name[np].in1d, parameter[call[name[np].array, parameter[name[self].ids]], call[name[np].array, parameter[name[ids]]]]]]]][constant[0]]
variable[result] assign[=] call[name[result]][tuple[[<ast.Slice object at 0x7da18ede67a0>, <ast.Name object at 0x7da18ede51b0>]]]
if compare[name[voxels] is_not constant[None]] begin[:]
variable[result] assign[=] call[name[result]][tuple[[<ast.Name object at 0x7da18ede7f70>, <ast.Slice object at 0x7da18ede6770>]]]
return[<ast.IfExp object at 0x7da18ede75b0>]
|
keyword[def] identifier[get_image_data] ( identifier[self] , identifier[ids] = keyword[None] , identifier[voxels] = keyword[None] , identifier[dense] = keyword[True] ):
literal[string]
keyword[if] identifier[dense] keyword[and] identifier[ids] keyword[is] keyword[None] keyword[and] identifier[voxels] keyword[is] keyword[None] :
identifier[logger] . identifier[warning] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[result] = identifier[self] . identifier[data]
keyword[if] identifier[ids] keyword[is] keyword[not] keyword[None] :
identifier[idxs] = identifier[np] . identifier[where] ( identifier[np] . identifier[in1d] ( identifier[np] . identifier[array] ( identifier[self] . identifier[ids] ), identifier[np] . identifier[array] ( identifier[ids] )))[ literal[int] ]
identifier[result] = identifier[result] [:, identifier[idxs] ]
keyword[if] identifier[voxels] keyword[is] keyword[not] keyword[None] :
identifier[result] = identifier[result] [ identifier[voxels] ,:]
keyword[return] identifier[result] . identifier[toarray] () keyword[if] identifier[dense] keyword[else] identifier[result]
|
def get_image_data(self, ids=None, voxels=None, dense=True):
""" Slices and returns a subset of image data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return. If None, returns data for all studies.
voxels (list, array): A list or 1D numpy array of voxel indices
(i.e., rows) to return. If None, returns data for all voxels.
dense (bool): Optional boolean. When True (default), convert the
result to a dense array before returning. When False, keep as
sparse matrix.
Returns:
A 2D numpy array with voxels in rows and studies in columns.
"""
if dense and ids is None and (voxels is None):
logger.warning('Warning: get_image_data() is being called without specifying a subset of studies or voxels to retrieve. This may result in a very large amount of data (several GB) being read into memory. If you experience any problems, consider returning a sparse matrix by passing dense=False, or pass in a list of ids of voxels to retrieve only a portion of the data.') # depends on [control=['if'], data=[]]
result = self.data
if ids is not None:
idxs = np.where(np.in1d(np.array(self.ids), np.array(ids)))[0]
result = result[:, idxs] # depends on [control=['if'], data=['ids']]
if voxels is not None:
result = result[voxels, :] # depends on [control=['if'], data=['voxels']]
return result.toarray() if dense else result
|
def get_data_dirs(__pkg: str) -> List[str]:
"""Return all data directories for given package.
Args:
__pkg: Package name
"""
dirs = [user_data(__pkg), ]
dirs.extend(path.expanduser(path.sep.join([d, __pkg]))
for d in getenv('XDG_DATA_DIRS',
'/usr/local/share/:/usr/share/').split(':'))
return [d for d in dirs if path.isdir(d)]
|
def function[get_data_dirs, parameter[__pkg]]:
constant[Return all data directories for given package.
Args:
__pkg: Package name
]
variable[dirs] assign[=] list[[<ast.Call object at 0x7da18ede4820>]]
call[name[dirs].extend, parameter[<ast.GeneratorExp object at 0x7da18ede49d0>]]
return[<ast.ListComp object at 0x7da20c6c7c70>]
|
keyword[def] identifier[get_data_dirs] ( identifier[__pkg] : identifier[str] )-> identifier[List] [ identifier[str] ]:
literal[string]
identifier[dirs] =[ identifier[user_data] ( identifier[__pkg] ),]
identifier[dirs] . identifier[extend] ( identifier[path] . identifier[expanduser] ( identifier[path] . identifier[sep] . identifier[join] ([ identifier[d] , identifier[__pkg] ]))
keyword[for] identifier[d] keyword[in] identifier[getenv] ( literal[string] ,
literal[string] ). identifier[split] ( literal[string] ))
keyword[return] [ identifier[d] keyword[for] identifier[d] keyword[in] identifier[dirs] keyword[if] identifier[path] . identifier[isdir] ( identifier[d] )]
|
def get_data_dirs(__pkg: str) -> List[str]:
"""Return all data directories for given package.
Args:
__pkg: Package name
"""
dirs = [user_data(__pkg)]
dirs.extend((path.expanduser(path.sep.join([d, __pkg])) for d in getenv('XDG_DATA_DIRS', '/usr/local/share/:/usr/share/').split(':')))
return [d for d in dirs if path.isdir(d)]
|
def check_in(request, action):
"""This function checks for missing properties in the request dict
for the corresponding action."""
if not request:
req_str = ""
for idx, val in enumerate(actions[action]):
req_str += "\n" + val
erstr = "Provide a request dict with the following properties:" \
" %s" % req_str
raise ValueError(erstr)
required_fields = actions[action]
missing = []
for field in required_fields:
if not is_key_present(request, field):
missing.append(field)
if missing:
missing_string = ""
for idx, val in enumerate(missing):
missing_string += "\n" + val
erstr = "Provide the required request parameters to" \
" complete this request: %s" % missing_string
raise ValueError(erstr)
return True
|
def function[check_in, parameter[request, action]]:
constant[This function checks for missing properties in the request dict
for the corresponding action.]
if <ast.UnaryOp object at 0x7da1b236d720> begin[:]
variable[req_str] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da1b236dbd0>, <ast.Name object at 0x7da1b236ca30>]]] in starred[call[name[enumerate], parameter[call[name[actions]][name[action]]]]] begin[:]
<ast.AugAssign object at 0x7da1b236ea40>
variable[erstr] assign[=] binary_operation[constant[Provide a request dict with the following properties: %s] <ast.Mod object at 0x7da2590d6920> name[req_str]]
<ast.Raise object at 0x7da1b23ed480>
variable[required_fields] assign[=] call[name[actions]][name[action]]
variable[missing] assign[=] list[[]]
for taget[name[field]] in starred[name[required_fields]] begin[:]
if <ast.UnaryOp object at 0x7da1b23ec8b0> begin[:]
call[name[missing].append, parameter[name[field]]]
if name[missing] begin[:]
variable[missing_string] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da1b23ec6d0>, <ast.Name object at 0x7da1b23ec790>]]] in starred[call[name[enumerate], parameter[name[missing]]]] begin[:]
<ast.AugAssign object at 0x7da1b23ee5f0>
variable[erstr] assign[=] binary_operation[constant[Provide the required request parameters to complete this request: %s] <ast.Mod object at 0x7da2590d6920> name[missing_string]]
<ast.Raise object at 0x7da1b2290610>
return[constant[True]]
|
keyword[def] identifier[check_in] ( identifier[request] , identifier[action] ):
literal[string]
keyword[if] keyword[not] identifier[request] :
identifier[req_str] = literal[string]
keyword[for] identifier[idx] , identifier[val] keyword[in] identifier[enumerate] ( identifier[actions] [ identifier[action] ]):
identifier[req_str] += literal[string] + identifier[val]
identifier[erstr] = literal[string] literal[string] % identifier[req_str]
keyword[raise] identifier[ValueError] ( identifier[erstr] )
identifier[required_fields] = identifier[actions] [ identifier[action] ]
identifier[missing] =[]
keyword[for] identifier[field] keyword[in] identifier[required_fields] :
keyword[if] keyword[not] identifier[is_key_present] ( identifier[request] , identifier[field] ):
identifier[missing] . identifier[append] ( identifier[field] )
keyword[if] identifier[missing] :
identifier[missing_string] = literal[string]
keyword[for] identifier[idx] , identifier[val] keyword[in] identifier[enumerate] ( identifier[missing] ):
identifier[missing_string] += literal[string] + identifier[val]
identifier[erstr] = literal[string] literal[string] % identifier[missing_string]
keyword[raise] identifier[ValueError] ( identifier[erstr] )
keyword[return] keyword[True]
|
def check_in(request, action):
"""This function checks for missing properties in the request dict
for the corresponding action."""
if not request:
req_str = ''
for (idx, val) in enumerate(actions[action]):
req_str += '\n' + val # depends on [control=['for'], data=[]]
erstr = 'Provide a request dict with the following properties: %s' % req_str
raise ValueError(erstr) # depends on [control=['if'], data=[]]
required_fields = actions[action]
missing = []
for field in required_fields:
if not is_key_present(request, field):
missing.append(field) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
if missing:
missing_string = ''
for (idx, val) in enumerate(missing):
missing_string += '\n' + val # depends on [control=['for'], data=[]]
erstr = 'Provide the required request parameters to complete this request: %s' % missing_string
raise ValueError(erstr) # depends on [control=['if'], data=[]]
return True
|
def add_field(cls, name, descriptor):
"""Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
"""
if not isinstance(descriptor, MediaField):
raise ValueError(
u'{0} must be an instance of MediaField'.format(descriptor))
if name in cls.__dict__:
raise ValueError(
u'property "{0}" already exists on MediaField'.format(name))
setattr(cls, name, descriptor)
|
def function[add_field, parameter[cls, name, descriptor]]:
constant[Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
]
if <ast.UnaryOp object at 0x7da1b10ecc10> begin[:]
<ast.Raise object at 0x7da1b10ed6f0>
if compare[name[name] in name[cls].__dict__] begin[:]
<ast.Raise object at 0x7da1b10ef760>
call[name[setattr], parameter[name[cls], name[name], name[descriptor]]]
|
keyword[def] identifier[add_field] ( identifier[cls] , identifier[name] , identifier[descriptor] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[descriptor] , identifier[MediaField] ):
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[descriptor] ))
keyword[if] identifier[name] keyword[in] identifier[cls] . identifier[__dict__] :
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[name] ))
identifier[setattr] ( identifier[cls] , identifier[name] , identifier[descriptor] )
|
def add_field(cls, name, descriptor):
"""Add a field to store custom tags.
:param name: the name of the property the field is accessed
through. It must not already exist on this class.
:param descriptor: an instance of :class:`MediaField`.
"""
if not isinstance(descriptor, MediaField):
raise ValueError(u'{0} must be an instance of MediaField'.format(descriptor)) # depends on [control=['if'], data=[]]
if name in cls.__dict__:
raise ValueError(u'property "{0}" already exists on MediaField'.format(name)) # depends on [control=['if'], data=['name']]
setattr(cls, name, descriptor)
|
def popone(self, key, default=_marker):
"""Remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise
KeyError is raised.
"""
identity = self._title(key)
for i in range(len(self._impl._items)):
if self._impl._items[i][0] == identity:
value = self._impl._items[i][2]
del self._impl._items[i]
self._impl.incr_version()
return value
if default is _marker:
raise KeyError(key)
else:
return default
|
def function[popone, parameter[self, key, default]]:
constant[Remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise
KeyError is raised.
]
variable[identity] assign[=] call[name[self]._title, parameter[name[key]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[self]._impl._items]]]]] begin[:]
if compare[call[call[name[self]._impl._items][name[i]]][constant[0]] equal[==] name[identity]] begin[:]
variable[value] assign[=] call[call[name[self]._impl._items][name[i]]][constant[2]]
<ast.Delete object at 0x7da1b11ba1a0>
call[name[self]._impl.incr_version, parameter[]]
return[name[value]]
if compare[name[default] is name[_marker]] begin[:]
<ast.Raise object at 0x7da1b12bd720>
|
keyword[def] identifier[popone] ( identifier[self] , identifier[key] , identifier[default] = identifier[_marker] ):
literal[string]
identifier[identity] = identifier[self] . identifier[_title] ( identifier[key] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[_impl] . identifier[_items] )):
keyword[if] identifier[self] . identifier[_impl] . identifier[_items] [ identifier[i] ][ literal[int] ]== identifier[identity] :
identifier[value] = identifier[self] . identifier[_impl] . identifier[_items] [ identifier[i] ][ literal[int] ]
keyword[del] identifier[self] . identifier[_impl] . identifier[_items] [ identifier[i] ]
identifier[self] . identifier[_impl] . identifier[incr_version] ()
keyword[return] identifier[value]
keyword[if] identifier[default] keyword[is] identifier[_marker] :
keyword[raise] identifier[KeyError] ( identifier[key] )
keyword[else] :
keyword[return] identifier[default]
|
def popone(self, key, default=_marker):
"""Remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise
KeyError is raised.
"""
identity = self._title(key)
for i in range(len(self._impl._items)):
if self._impl._items[i][0] == identity:
value = self._impl._items[i][2]
del self._impl._items[i]
self._impl.incr_version()
return value # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
if default is _marker:
raise KeyError(key) # depends on [control=['if'], data=[]]
else:
return default
|
def data(self, table_name, metadata, persist_as=None, how='the_geom'):
"""Get an augmented CARTO dataset with `Data Observatory
<https://carto.com/data-observatory>`__ measures. Use
`CartoContext.data_discovery
<#context.CartoContext.data_discovery>`__ to search for available
measures, or see the full `Data Observatory catalog
<https://cartodb.github.io/bigmetadata/index.html>`__. Optionally
persist the data as a new table.
Example:
Get a DataFrame with Data Observatory measures based on the
geometries in a CARTO table.
.. code::
cc = cartoframes.CartoContext(BASEURL, APIKEY)
median_income = cc.data_discovery('transaction_events',
regex='.*median income.*',
time='2011 - 2015')
df = cc.data('transaction_events',
median_income)
Pass in cherry-picked measures from the Data Observatory catalog.
The rest of the metadata will be filled in, but it's important to
specify the geographic level as this will not show up in the column
name.
.. code::
median_income = [{'numer_id': 'us.census.acs.B19013001',
'geom_id': 'us.census.tiger.block_group',
'numer_timespan': '2011 - 2015'}]
df = cc.data('transaction_events', median_income)
Args:
table_name (str): Name of table on CARTO account that Data
Observatory measures are to be added to.
metadata (pandas.DataFrame): List of all measures to add to
`table_name`. See :py:meth:`CartoContext.data_discovery
<cartoframes.context.CartoContext.data_discovery>` outputs
for a full list of metadata columns.
persist_as (str, optional): Output the results of augmenting
`table_name` to `persist_as` as a persistent table on CARTO.
Defaults to ``None``, which will not create a table.
how (str, optional): **Not fully implemented**. Column name for
identifying the geometry from which to fetch the data. Defaults
to `the_geom`, which results in measures that are spatially
interpolated (e.g., a neighborhood boundary's population will
be calculated from underlying census tracts). Specifying a
column that has the geometry identifier (for example, GEOID for
US Census boundaries), results in measures directly from the
Census for that GEOID but normalized how it is specified in the
metadata.
Returns:
pandas.DataFrame: A DataFrame representation of `table_name` which
has new columns for each measure in `metadata`.
Raises:
NameError: If the columns in `table_name` are in the
``suggested_name`` column of `metadata`.
ValueError: If metadata object is invalid or empty, or if the
number of requested measures exceeds 50.
CartoException: If user account consumes all of Data Observatory
quota
"""
# if how != 'the_geom':
# raise NotImplementedError('Data gathering currently only works if '
# 'a geometry is present')
if isinstance(metadata, pd.DataFrame):
_meta = metadata.copy().reset_index()
elif isinstance(metadata, collections.Iterable):
query = utils.minify_sql((
'WITH envelope AS (',
' SELECT',
' ST_SetSRID(ST_Extent(the_geom)::geometry, 4326) AS env,',
' count(*)::int AS cnt',
' FROM {table_name}',
')',
'SELECT *',
' FROM json_to_recordset(',
' (SELECT OBS_GetMeta(',
' envelope.env,',
' (\'{meta}\')::json,',
' 10, 1, envelope.cnt',
' ) AS meta',
' FROM envelope',
' GROUP BY env, cnt)) as data(',
' denom_aggregate text, denom_colname text,',
' denom_description text, denom_geomref_colname text,',
' denom_id text, denom_name text, denom_reltype text,',
' denom_t_description text, denom_tablename text,',
' denom_type text, geom_colname text,',
' geom_description text,geom_geomref_colname text,',
' geom_id text, geom_name text, geom_t_description text,',
' geom_tablename text, geom_timespan text,',
' geom_type text, id numeric, max_score_rank text,',
' max_timespan_rank text, normalization text, num_geoms',
' numeric,numer_aggregate text, numer_colname text,',
' numer_description text, numer_geomref_colname text,',
' numer_id text, numer_name text, numer_t_description',
' text, numer_tablename text, numer_timespan text,',
' numer_type text, score numeric, score_rank numeric,',
' score_rownum numeric, suggested_name text,',
' target_area text, target_geoms text, timespan_rank',
' numeric, timespan_rownum numeric)',
)).format(table_name=table_name,
meta=json.dumps(metadata).replace('\'', '\'\''))
_meta = self.fetch(query)
if _meta.shape[0] == 0:
raise ValueError('There are no valid metadata entries. Check '
'inputs.')
elif _meta.shape[0] > 50:
raise ValueError('The number of metadata entries exceeds 50. Tip: '
'If `metadata` is a pandas.DataFrame, iterate '
'over this object using `metadata.groupby`. If '
'it is a list, iterate over chunks of it. Then '
'combine resulting DataFrames using '
'`pandas.concat`')
# get column names except the_geom_webmercator
dataset = Dataset(self, table_name)
table_columns = dataset.get_table_column_names(exclude=['the_geom_webmercator'])
names = {}
for suggested in _meta['suggested_name']:
if suggested in table_columns:
names[suggested] = utils.unique_colname(suggested, table_columns)
warn(
'{s0} was augmented as {s1} because of name '
'collision'.format(s0=suggested, s1=names[suggested])
)
else:
names[suggested] = suggested
# drop description columns to lighten the query
# FIXME https://github.com/CartoDB/cartoframes/issues/593
meta_columns = _meta.columns.values
drop_columns = []
for meta_column in meta_columns:
if meta_column.endswith('_description'):
drop_columns.append(meta_column)
if len(drop_columns) > 0:
_meta.drop(drop_columns, axis=1, inplace=True)
cols = ', '.join(
'(data->{n}->>\'value\')::{pgtype} AS {col}'.format(
n=row[0],
pgtype=row[1]['numer_type'],
col=names[row[1]['suggested_name']])
for row in _meta.iterrows())
query = utils.minify_sql((
'SELECT {table_cols}, {cols}',
' FROM OBS_GetData(',
' (SELECT array_agg({how})',
' FROM "{tablename}"),',
' (SELECT \'{meta}\'::json)) as m,',
' {tablename} as t',
' WHERE t."{rowid}" = m.id',)).format(
how=('(the_geom, cartodb_id)::geomval'
if how == 'the_geom' else how),
tablename=table_name,
rowid='cartodb_id' if how == 'the_geom' else how,
cols=cols,
table_cols=','.join('t.{}'.format(c) for c in table_columns),
meta=_meta.to_json(orient='records').replace('\'', '\'\''))
return self.query(query, table_name=persist_as, decode_geom=False, is_select=True)
|
def function[data, parameter[self, table_name, metadata, persist_as, how]]:
constant[Get an augmented CARTO dataset with `Data Observatory
<https://carto.com/data-observatory>`__ measures. Use
`CartoContext.data_discovery
<#context.CartoContext.data_discovery>`__ to search for available
measures, or see the full `Data Observatory catalog
<https://cartodb.github.io/bigmetadata/index.html>`__. Optionally
persist the data as a new table.
Example:
Get a DataFrame with Data Observatory measures based on the
geometries in a CARTO table.
.. code::
cc = cartoframes.CartoContext(BASEURL, APIKEY)
median_income = cc.data_discovery('transaction_events',
regex='.*median income.*',
time='2011 - 2015')
df = cc.data('transaction_events',
median_income)
Pass in cherry-picked measures from the Data Observatory catalog.
The rest of the metadata will be filled in, but it's important to
specify the geographic level as this will not show up in the column
name.
.. code::
median_income = [{'numer_id': 'us.census.acs.B19013001',
'geom_id': 'us.census.tiger.block_group',
'numer_timespan': '2011 - 2015'}]
df = cc.data('transaction_events', median_income)
Args:
table_name (str): Name of table on CARTO account that Data
Observatory measures are to be added to.
metadata (pandas.DataFrame): List of all measures to add to
`table_name`. See :py:meth:`CartoContext.data_discovery
<cartoframes.context.CartoContext.data_discovery>` outputs
for a full list of metadata columns.
persist_as (str, optional): Output the results of augmenting
`table_name` to `persist_as` as a persistent table on CARTO.
Defaults to ``None``, which will not create a table.
how (str, optional): **Not fully implemented**. Column name for
identifying the geometry from which to fetch the data. Defaults
to `the_geom`, which results in measures that are spatially
interpolated (e.g., a neighborhood boundary's population will
be calculated from underlying census tracts). Specifying a
column that has the geometry identifier (for example, GEOID for
US Census boundaries), results in measures directly from the
Census for that GEOID but normalized how it is specified in the
metadata.
Returns:
pandas.DataFrame: A DataFrame representation of `table_name` which
has new columns for each measure in `metadata`.
Raises:
NameError: If the columns in `table_name` are in the
``suggested_name`` column of `metadata`.
ValueError: If metadata object is invalid or empty, or if the
number of requested measures exceeds 50.
CartoException: If user account consumes all of Data Observatory
quota
]
if call[name[isinstance], parameter[name[metadata], name[pd].DataFrame]] begin[:]
variable[_meta] assign[=] call[call[name[metadata].copy, parameter[]].reset_index, parameter[]]
if compare[call[name[_meta].shape][constant[0]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18dc060e0>
variable[dataset] assign[=] call[name[Dataset], parameter[name[self], name[table_name]]]
variable[table_columns] assign[=] call[name[dataset].get_table_column_names, parameter[]]
variable[names] assign[=] dictionary[[], []]
for taget[name[suggested]] in starred[call[name[_meta]][constant[suggested_name]]] begin[:]
if compare[name[suggested] in name[table_columns]] begin[:]
call[name[names]][name[suggested]] assign[=] call[name[utils].unique_colname, parameter[name[suggested], name[table_columns]]]
call[name[warn], parameter[call[constant[{s0} was augmented as {s1} because of name collision].format, parameter[]]]]
variable[meta_columns] assign[=] name[_meta].columns.values
variable[drop_columns] assign[=] list[[]]
for taget[name[meta_column]] in starred[name[meta_columns]] begin[:]
if call[name[meta_column].endswith, parameter[constant[_description]]] begin[:]
call[name[drop_columns].append, parameter[name[meta_column]]]
if compare[call[name[len], parameter[name[drop_columns]]] greater[>] constant[0]] begin[:]
call[name[_meta].drop, parameter[name[drop_columns]]]
variable[cols] assign[=] call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da18dc079d0>]]
variable[query] assign[=] call[call[name[utils].minify_sql, parameter[tuple[[<ast.Constant object at 0x7da18dc05180>, <ast.Constant object at 0x7da18dc040d0>, <ast.Constant object at 0x7da18dc06f80>, <ast.Constant object at 0x7da18dc05bd0>, <ast.Constant object at 0x7da18dc04490>, <ast.Constant object at 0x7da18dc07c40>, <ast.Constant object at 0x7da18dc06710>]]]].format, parameter[]]
return[call[name[self].query, parameter[name[query]]]]
|
keyword[def] identifier[data] ( identifier[self] , identifier[table_name] , identifier[metadata] , identifier[persist_as] = keyword[None] , identifier[how] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[metadata] , identifier[pd] . identifier[DataFrame] ):
identifier[_meta] = identifier[metadata] . identifier[copy] (). identifier[reset_index] ()
keyword[elif] identifier[isinstance] ( identifier[metadata] , identifier[collections] . identifier[Iterable] ):
identifier[query] = identifier[utils] . identifier[minify_sql] ((
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
)). identifier[format] ( identifier[table_name] = identifier[table_name] ,
identifier[meta] = identifier[json] . identifier[dumps] ( identifier[metadata] ). identifier[replace] ( literal[string] , literal[string] ))
identifier[_meta] = identifier[self] . identifier[fetch] ( identifier[query] )
keyword[if] identifier[_meta] . identifier[shape] [ literal[int] ]== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[elif] identifier[_meta] . identifier[shape] [ literal[int] ]> literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[dataset] = identifier[Dataset] ( identifier[self] , identifier[table_name] )
identifier[table_columns] = identifier[dataset] . identifier[get_table_column_names] ( identifier[exclude] =[ literal[string] ])
identifier[names] ={}
keyword[for] identifier[suggested] keyword[in] identifier[_meta] [ literal[string] ]:
keyword[if] identifier[suggested] keyword[in] identifier[table_columns] :
identifier[names] [ identifier[suggested] ]= identifier[utils] . identifier[unique_colname] ( identifier[suggested] , identifier[table_columns] )
identifier[warn] (
literal[string]
literal[string] . identifier[format] ( identifier[s0] = identifier[suggested] , identifier[s1] = identifier[names] [ identifier[suggested] ])
)
keyword[else] :
identifier[names] [ identifier[suggested] ]= identifier[suggested]
identifier[meta_columns] = identifier[_meta] . identifier[columns] . identifier[values]
identifier[drop_columns] =[]
keyword[for] identifier[meta_column] keyword[in] identifier[meta_columns] :
keyword[if] identifier[meta_column] . identifier[endswith] ( literal[string] ):
identifier[drop_columns] . identifier[append] ( identifier[meta_column] )
keyword[if] identifier[len] ( identifier[drop_columns] )> literal[int] :
identifier[_meta] . identifier[drop] ( identifier[drop_columns] , identifier[axis] = literal[int] , identifier[inplace] = keyword[True] )
identifier[cols] = literal[string] . identifier[join] (
literal[string] . identifier[format] (
identifier[n] = identifier[row] [ literal[int] ],
identifier[pgtype] = identifier[row] [ literal[int] ][ literal[string] ],
identifier[col] = identifier[names] [ identifier[row] [ literal[int] ][ literal[string] ]])
keyword[for] identifier[row] keyword[in] identifier[_meta] . identifier[iterrows] ())
identifier[query] = identifier[utils] . identifier[minify_sql] ((
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,)). identifier[format] (
identifier[how] =( literal[string]
keyword[if] identifier[how] == literal[string] keyword[else] identifier[how] ),
identifier[tablename] = identifier[table_name] ,
identifier[rowid] = literal[string] keyword[if] identifier[how] == literal[string] keyword[else] identifier[how] ,
identifier[cols] = identifier[cols] ,
identifier[table_cols] = literal[string] . identifier[join] ( literal[string] . identifier[format] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[table_columns] ),
identifier[meta] = identifier[_meta] . identifier[to_json] ( identifier[orient] = literal[string] ). identifier[replace] ( literal[string] , literal[string] ))
keyword[return] identifier[self] . identifier[query] ( identifier[query] , identifier[table_name] = identifier[persist_as] , identifier[decode_geom] = keyword[False] , identifier[is_select] = keyword[True] )
|
def data(self, table_name, metadata, persist_as=None, how='the_geom'):
"""Get an augmented CARTO dataset with `Data Observatory
<https://carto.com/data-observatory>`__ measures. Use
`CartoContext.data_discovery
<#context.CartoContext.data_discovery>`__ to search for available
measures, or see the full `Data Observatory catalog
<https://cartodb.github.io/bigmetadata/index.html>`__. Optionally
persist the data as a new table.
Example:
Get a DataFrame with Data Observatory measures based on the
geometries in a CARTO table.
.. code::
cc = cartoframes.CartoContext(BASEURL, APIKEY)
median_income = cc.data_discovery('transaction_events',
regex='.*median income.*',
time='2011 - 2015')
df = cc.data('transaction_events',
median_income)
Pass in cherry-picked measures from the Data Observatory catalog.
The rest of the metadata will be filled in, but it's important to
specify the geographic level as this will not show up in the column
name.
.. code::
median_income = [{'numer_id': 'us.census.acs.B19013001',
'geom_id': 'us.census.tiger.block_group',
'numer_timespan': '2011 - 2015'}]
df = cc.data('transaction_events', median_income)
Args:
table_name (str): Name of table on CARTO account that Data
Observatory measures are to be added to.
metadata (pandas.DataFrame): List of all measures to add to
`table_name`. See :py:meth:`CartoContext.data_discovery
<cartoframes.context.CartoContext.data_discovery>` outputs
for a full list of metadata columns.
persist_as (str, optional): Output the results of augmenting
`table_name` to `persist_as` as a persistent table on CARTO.
Defaults to ``None``, which will not create a table.
how (str, optional): **Not fully implemented**. Column name for
identifying the geometry from which to fetch the data. Defaults
to `the_geom`, which results in measures that are spatially
interpolated (e.g., a neighborhood boundary's population will
be calculated from underlying census tracts). Specifying a
column that has the geometry identifier (for example, GEOID for
US Census boundaries), results in measures directly from the
Census for that GEOID but normalized how it is specified in the
metadata.
Returns:
pandas.DataFrame: A DataFrame representation of `table_name` which
has new columns for each measure in `metadata`.
Raises:
NameError: If the columns in `table_name` are in the
``suggested_name`` column of `metadata`.
ValueError: If metadata object is invalid or empty, or if the
number of requested measures exceeds 50.
CartoException: If user account consumes all of Data Observatory
quota
"""
# if how != 'the_geom':
# raise NotImplementedError('Data gathering currently only works if '
# 'a geometry is present')
if isinstance(metadata, pd.DataFrame):
_meta = metadata.copy().reset_index() # depends on [control=['if'], data=[]]
elif isinstance(metadata, collections.Iterable):
query = utils.minify_sql(('WITH envelope AS (', ' SELECT', ' ST_SetSRID(ST_Extent(the_geom)::geometry, 4326) AS env,', ' count(*)::int AS cnt', ' FROM {table_name}', ')', 'SELECT *', ' FROM json_to_recordset(', ' (SELECT OBS_GetMeta(', ' envelope.env,', " ('{meta}')::json,", ' 10, 1, envelope.cnt', ' ) AS meta', ' FROM envelope', ' GROUP BY env, cnt)) as data(', ' denom_aggregate text, denom_colname text,', ' denom_description text, denom_geomref_colname text,', ' denom_id text, denom_name text, denom_reltype text,', ' denom_t_description text, denom_tablename text,', ' denom_type text, geom_colname text,', ' geom_description text,geom_geomref_colname text,', ' geom_id text, geom_name text, geom_t_description text,', ' geom_tablename text, geom_timespan text,', ' geom_type text, id numeric, max_score_rank text,', ' max_timespan_rank text, normalization text, num_geoms', ' numeric,numer_aggregate text, numer_colname text,', ' numer_description text, numer_geomref_colname text,', ' numer_id text, numer_name text, numer_t_description', ' text, numer_tablename text, numer_timespan text,', ' numer_type text, score numeric, score_rank numeric,', ' score_rownum numeric, suggested_name text,', ' target_area text, target_geoms text, timespan_rank', ' numeric, timespan_rownum numeric)')).format(table_name=table_name, meta=json.dumps(metadata).replace("'", "''"))
_meta = self.fetch(query) # depends on [control=['if'], data=[]]
if _meta.shape[0] == 0:
raise ValueError('There are no valid metadata entries. Check inputs.') # depends on [control=['if'], data=[]]
elif _meta.shape[0] > 50:
raise ValueError('The number of metadata entries exceeds 50. Tip: If `metadata` is a pandas.DataFrame, iterate over this object using `metadata.groupby`. If it is a list, iterate over chunks of it. Then combine resulting DataFrames using `pandas.concat`') # depends on [control=['if'], data=[]]
# get column names except the_geom_webmercator
dataset = Dataset(self, table_name)
table_columns = dataset.get_table_column_names(exclude=['the_geom_webmercator'])
names = {}
for suggested in _meta['suggested_name']:
if suggested in table_columns:
names[suggested] = utils.unique_colname(suggested, table_columns)
warn('{s0} was augmented as {s1} because of name collision'.format(s0=suggested, s1=names[suggested])) # depends on [control=['if'], data=['suggested', 'table_columns']]
else:
names[suggested] = suggested # depends on [control=['for'], data=['suggested']]
# drop description columns to lighten the query
# FIXME https://github.com/CartoDB/cartoframes/issues/593
meta_columns = _meta.columns.values
drop_columns = []
for meta_column in meta_columns:
if meta_column.endswith('_description'):
drop_columns.append(meta_column) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['meta_column']]
if len(drop_columns) > 0:
_meta.drop(drop_columns, axis=1, inplace=True) # depends on [control=['if'], data=[]]
cols = ', '.join(("(data->{n}->>'value')::{pgtype} AS {col}".format(n=row[0], pgtype=row[1]['numer_type'], col=names[row[1]['suggested_name']]) for row in _meta.iterrows()))
query = utils.minify_sql(('SELECT {table_cols}, {cols}', ' FROM OBS_GetData(', ' (SELECT array_agg({how})', ' FROM "{tablename}"),', " (SELECT '{meta}'::json)) as m,", ' {tablename} as t', ' WHERE t."{rowid}" = m.id')).format(how='(the_geom, cartodb_id)::geomval' if how == 'the_geom' else how, tablename=table_name, rowid='cartodb_id' if how == 'the_geom' else how, cols=cols, table_cols=','.join(('t.{}'.format(c) for c in table_columns)), meta=_meta.to_json(orient='records').replace("'", "''"))
return self.query(query, table_name=persist_as, decode_geom=False, is_select=True)
|
def CountHuntFlows(self,
hunt_id,
filter_condition=db.HuntFlowsCondition.UNSET):
"""Counts hunt flows matching given conditions."""
return len(
self.ReadHuntFlows(
hunt_id, 0, sys.maxsize, filter_condition=filter_condition))
|
def function[CountHuntFlows, parameter[self, hunt_id, filter_condition]]:
constant[Counts hunt flows matching given conditions.]
return[call[name[len], parameter[call[name[self].ReadHuntFlows, parameter[name[hunt_id], constant[0], name[sys].maxsize]]]]]
|
keyword[def] identifier[CountHuntFlows] ( identifier[self] ,
identifier[hunt_id] ,
identifier[filter_condition] = identifier[db] . identifier[HuntFlowsCondition] . identifier[UNSET] ):
literal[string]
keyword[return] identifier[len] (
identifier[self] . identifier[ReadHuntFlows] (
identifier[hunt_id] , literal[int] , identifier[sys] . identifier[maxsize] , identifier[filter_condition] = identifier[filter_condition] ))
|
def CountHuntFlows(self, hunt_id, filter_condition=db.HuntFlowsCondition.UNSET):
"""Counts hunt flows matching given conditions."""
return len(self.ReadHuntFlows(hunt_id, 0, sys.maxsize, filter_condition=filter_condition))
|
def get(self):
"""Return a Deferred that fires with a SourceStamp instance."""
d = self.getBaseRevision()
d.addCallback(self.getPatch)
d.addCallback(self.done)
return d
|
def function[get, parameter[self]]:
constant[Return a Deferred that fires with a SourceStamp instance.]
variable[d] assign[=] call[name[self].getBaseRevision, parameter[]]
call[name[d].addCallback, parameter[name[self].getPatch]]
call[name[d].addCallback, parameter[name[self].done]]
return[name[d]]
|
keyword[def] identifier[get] ( identifier[self] ):
literal[string]
identifier[d] = identifier[self] . identifier[getBaseRevision] ()
identifier[d] . identifier[addCallback] ( identifier[self] . identifier[getPatch] )
identifier[d] . identifier[addCallback] ( identifier[self] . identifier[done] )
keyword[return] identifier[d]
|
def get(self):
"""Return a Deferred that fires with a SourceStamp instance."""
d = self.getBaseRevision()
d.addCallback(self.getPatch)
d.addCallback(self.done)
return d
|
def Region2_cp0(Tr, Pr):
"""Ideal properties for Region 2
Parameters
----------
Tr : float
Reduced temperature, [-]
Pr : float
Reduced pressure, [-]
Returns
-------
prop : array
Array with ideal Gibbs energy partial derivatives:
* g: Ideal Specific Gibbs energy [kJ/kg]
* gp: ∂g/∂P|T
* gpp: ∂²g/∂P²|T
* gt: ∂g/∂T|P
* gtt: ∂²g/∂T²|P
* gpt: ∂²g/∂T∂P
References
----------
IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http://www.iapws.org/relguide/IF97-Rev.html, Eq 16
"""
Jo = [0, 1, -5, -4, -3, -2, -1, 2, 3]
no = [-0.96927686500217E+01, 0.10086655968018E+02, -0.56087911283020E-02,
0.71452738081455E-01, -0.40710498223928E+00, 0.14240819171444E+01,
-0.43839511319450E+01, -0.28408632460772E+00, 0.21268463753307E-01]
go = log(Pr)
gop = Pr**-1
gopp = -Pr**-2
got = gott = gopt = 0
for j, ni in zip(Jo, no):
go += ni * Tr**j
got += ni*j * Tr**(j-1)
gott += ni*j*(j-1) * Tr**(j-2)
return go, gop, gopp, got, gott, gopt
|
def function[Region2_cp0, parameter[Tr, Pr]]:
constant[Ideal properties for Region 2
Parameters
----------
Tr : float
Reduced temperature, [-]
Pr : float
Reduced pressure, [-]
Returns
-------
prop : array
Array with ideal Gibbs energy partial derivatives:
* g: Ideal Specific Gibbs energy [kJ/kg]
* gp: ∂g/∂P|T
* gpp: ∂²g/∂P²|T
* gt: ∂g/∂T|P
* gtt: ∂²g/∂T²|P
* gpt: ∂²g/∂T∂P
References
----------
IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http://www.iapws.org/relguide/IF97-Rev.html, Eq 16
]
variable[Jo] assign[=] list[[<ast.Constant object at 0x7da1b06d4820>, <ast.Constant object at 0x7da1b06d47f0>, <ast.UnaryOp object at 0x7da1b06d47c0>, <ast.UnaryOp object at 0x7da1b06d7ac0>, <ast.UnaryOp object at 0x7da1b06d7b20>, <ast.UnaryOp object at 0x7da1b06d7b80>, <ast.UnaryOp object at 0x7da1b06d7be0>, <ast.Constant object at 0x7da1b06d7c40>, <ast.Constant object at 0x7da1b06d7c70>]]
variable[no] assign[=] list[[<ast.UnaryOp object at 0x7da1b06d79d0>, <ast.Constant object at 0x7da1b06d7970>, <ast.UnaryOp object at 0x7da1b06d7940>, <ast.Constant object at 0x7da1b06d78e0>, <ast.UnaryOp object at 0x7da1b06d78b0>, <ast.Constant object at 0x7da1b06d7850>, <ast.UnaryOp object at 0x7da1b06d73a0>, <ast.UnaryOp object at 0x7da1b06d7340>, <ast.Constant object at 0x7da1b06d72e0>]]
variable[go] assign[=] call[name[log], parameter[name[Pr]]]
variable[gop] assign[=] binary_operation[name[Pr] ** <ast.UnaryOp object at 0x7da1b06d6e00>]
variable[gopp] assign[=] <ast.UnaryOp object at 0x7da1b06d6ec0>
variable[got] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b06d7490>, <ast.Name object at 0x7da1b06d74c0>]]] in starred[call[name[zip], parameter[name[Jo], name[no]]]] begin[:]
<ast.AugAssign object at 0x7da1b06d75b0>
<ast.AugAssign object at 0x7da1b06d7700>
<ast.AugAssign object at 0x7da1b06d6050>
return[tuple[[<ast.Name object at 0x7da18bc73640>, <ast.Name object at 0x7da18bc70df0>, <ast.Name object at 0x7da18bc705b0>, <ast.Name object at 0x7da18bc72350>, <ast.Name object at 0x7da18bc71510>, <ast.Name object at 0x7da18bc723e0>]]]
|
keyword[def] identifier[Region2_cp0] ( identifier[Tr] , identifier[Pr] ):
literal[string]
identifier[Jo] =[ literal[int] , literal[int] ,- literal[int] ,- literal[int] ,- literal[int] ,- literal[int] ,- literal[int] , literal[int] , literal[int] ]
identifier[no] =[- literal[int] , literal[int] ,- literal[int] ,
literal[int] ,- literal[int] , literal[int] ,
- literal[int] ,- literal[int] , literal[int] ]
identifier[go] = identifier[log] ( identifier[Pr] )
identifier[gop] = identifier[Pr] **- literal[int]
identifier[gopp] =- identifier[Pr] **- literal[int]
identifier[got] = identifier[gott] = identifier[gopt] = literal[int]
keyword[for] identifier[j] , identifier[ni] keyword[in] identifier[zip] ( identifier[Jo] , identifier[no] ):
identifier[go] += identifier[ni] * identifier[Tr] ** identifier[j]
identifier[got] += identifier[ni] * identifier[j] * identifier[Tr] **( identifier[j] - literal[int] )
identifier[gott] += identifier[ni] * identifier[j] *( identifier[j] - literal[int] )* identifier[Tr] **( identifier[j] - literal[int] )
keyword[return] identifier[go] , identifier[gop] , identifier[gopp] , identifier[got] , identifier[gott] , identifier[gopt]
|
def Region2_cp0(Tr, Pr):
"""Ideal properties for Region 2
Parameters
----------
Tr : float
Reduced temperature, [-]
Pr : float
Reduced pressure, [-]
Returns
-------
prop : array
Array with ideal Gibbs energy partial derivatives:
* g: Ideal Specific Gibbs energy [kJ/kg]
* gp: ∂g/∂P|T
* gpp: ∂²g/∂P²|T
* gt: ∂g/∂T|P
* gtt: ∂²g/∂T²|P
* gpt: ∂²g/∂T∂P
References
----------
IAPWS, Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http://www.iapws.org/relguide/IF97-Rev.html, Eq 16
"""
Jo = [0, 1, -5, -4, -3, -2, -1, 2, 3]
no = [-9.6927686500217, 10.086655968018, -0.005608791128302, 0.071452738081455, -0.40710498223928, 1.4240819171444, -4.383951131945, -0.28408632460772, 0.021268463753307]
go = log(Pr)
gop = Pr ** (-1)
gopp = -Pr ** (-2)
got = gott = gopt = 0
for (j, ni) in zip(Jo, no):
go += ni * Tr ** j
got += ni * j * Tr ** (j - 1)
gott += ni * j * (j - 1) * Tr ** (j - 2) # depends on [control=['for'], data=[]]
return (go, gop, gopp, got, gott, gopt)
|
def add_resource(self, resource):
"""Add a resource to the list of interesting resources"""
if resource.exists():
self.resources[resource] = self.timekeeper.get_indicator(resource)
else:
self.resources[resource] = None
|
def function[add_resource, parameter[self, resource]]:
constant[Add a resource to the list of interesting resources]
if call[name[resource].exists, parameter[]] begin[:]
call[name[self].resources][name[resource]] assign[=] call[name[self].timekeeper.get_indicator, parameter[name[resource]]]
|
keyword[def] identifier[add_resource] ( identifier[self] , identifier[resource] ):
literal[string]
keyword[if] identifier[resource] . identifier[exists] ():
identifier[self] . identifier[resources] [ identifier[resource] ]= identifier[self] . identifier[timekeeper] . identifier[get_indicator] ( identifier[resource] )
keyword[else] :
identifier[self] . identifier[resources] [ identifier[resource] ]= keyword[None]
|
def add_resource(self, resource):
"""Add a resource to the list of interesting resources"""
if resource.exists():
self.resources[resource] = self.timekeeper.get_indicator(resource) # depends on [control=['if'], data=[]]
else:
self.resources[resource] = None
|
def add_member_to_group(self, group_id, member_id):
"""AddMemberToGroup.
[Preview API] Add a member to a Group.
:param str group_id: Id of the Group.
:param str member_id: Id of the member to add.
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
self._send(http_method='PUT',
location_id='45a36e53-5286-4518-aa72-2d29f7acc5d8',
version='5.0-preview.1',
route_values=route_values)
|
def function[add_member_to_group, parameter[self, group_id, member_id]]:
constant[AddMemberToGroup.
[Preview API] Add a member to a Group.
:param str group_id: Id of the Group.
:param str member_id: Id of the member to add.
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[group_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[groupId]] assign[=] call[name[self]._serialize.url, parameter[constant[group_id], name[group_id], constant[str]]]
if compare[name[member_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[memberId]] assign[=] call[name[self]._serialize.url, parameter[constant[member_id], name[member_id], constant[str]]]
call[name[self]._send, parameter[]]
|
keyword[def] identifier[add_member_to_group] ( identifier[self] , identifier[group_id] , identifier[member_id] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[group_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[group_id] , literal[string] )
keyword[if] identifier[member_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[member_id] , literal[string] )
identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] )
|
def add_member_to_group(self, group_id, member_id):
"""AddMemberToGroup.
[Preview API] Add a member to a Group.
:param str group_id: Id of the Group.
:param str member_id: Id of the member to add.
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') # depends on [control=['if'], data=['group_id']]
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str') # depends on [control=['if'], data=['member_id']]
self._send(http_method='PUT', location_id='45a36e53-5286-4518-aa72-2d29f7acc5d8', version='5.0-preview.1', route_values=route_values)
|
def dump_dh_parameters(dh_parameters, encoding='pem'):
"""
Serializes an asn1crypto.algos.DHParameters object into a byte string
:param dh_parameters:
An asn1crypto.algos.DHParameters object
:param encoding:
A unicode string of "pem" or "der"
:return:
A byte string of the encoded DH parameters
"""
if encoding not in set(['pem', 'der']):
raise ValueError(pretty_message(
'''
encoding must be one of "pem", "der", not %s
''',
repr(encoding)
))
if not isinstance(dh_parameters, algos.DHParameters):
raise TypeError(pretty_message(
'''
dh_parameters must be an instance of asn1crypto.algos.DHParameters,
not %s
''',
type_name(dh_parameters)
))
output = dh_parameters.dump()
if encoding == 'pem':
output = pem.armor('DH PARAMETERS', output)
return output
|
def function[dump_dh_parameters, parameter[dh_parameters, encoding]]:
constant[
Serializes an asn1crypto.algos.DHParameters object into a byte string
:param dh_parameters:
An asn1crypto.algos.DHParameters object
:param encoding:
A unicode string of "pem" or "der"
:return:
A byte string of the encoded DH parameters
]
if compare[name[encoding] <ast.NotIn object at 0x7da2590d7190> call[name[set], parameter[list[[<ast.Constant object at 0x7da1b00d8e50>, <ast.Constant object at 0x7da1b00dae60>]]]]] begin[:]
<ast.Raise object at 0x7da1b00dabc0>
if <ast.UnaryOp object at 0x7da1b00da770> begin[:]
<ast.Raise object at 0x7da1b00da890>
variable[output] assign[=] call[name[dh_parameters].dump, parameter[]]
if compare[name[encoding] equal[==] constant[pem]] begin[:]
variable[output] assign[=] call[name[pem].armor, parameter[constant[DH PARAMETERS], name[output]]]
return[name[output]]
|
keyword[def] identifier[dump_dh_parameters] ( identifier[dh_parameters] , identifier[encoding] = literal[string] ):
literal[string]
keyword[if] identifier[encoding] keyword[not] keyword[in] identifier[set] ([ literal[string] , literal[string] ]):
keyword[raise] identifier[ValueError] ( identifier[pretty_message] (
literal[string] ,
identifier[repr] ( identifier[encoding] )
))
keyword[if] keyword[not] identifier[isinstance] ( identifier[dh_parameters] , identifier[algos] . identifier[DHParameters] ):
keyword[raise] identifier[TypeError] ( identifier[pretty_message] (
literal[string] ,
identifier[type_name] ( identifier[dh_parameters] )
))
identifier[output] = identifier[dh_parameters] . identifier[dump] ()
keyword[if] identifier[encoding] == literal[string] :
identifier[output] = identifier[pem] . identifier[armor] ( literal[string] , identifier[output] )
keyword[return] identifier[output]
|
def dump_dh_parameters(dh_parameters, encoding='pem'):
"""
Serializes an asn1crypto.algos.DHParameters object into a byte string
:param dh_parameters:
An asn1crypto.algos.DHParameters object
:param encoding:
A unicode string of "pem" or "der"
:return:
A byte string of the encoded DH parameters
"""
if encoding not in set(['pem', 'der']):
raise ValueError(pretty_message('\n encoding must be one of "pem", "der", not %s\n ', repr(encoding))) # depends on [control=['if'], data=['encoding']]
if not isinstance(dh_parameters, algos.DHParameters):
raise TypeError(pretty_message('\n dh_parameters must be an instance of asn1crypto.algos.DHParameters,\n not %s\n ', type_name(dh_parameters))) # depends on [control=['if'], data=[]]
output = dh_parameters.dump()
if encoding == 'pem':
output = pem.armor('DH PARAMETERS', output) # depends on [control=['if'], data=[]]
return output
|
def poke(self, context):
"""
Pokes for a mail attachment on the mail server.
:param context: The context that is being provided when poking.
:type context: dict
:return: True if attachment with the given name is present and False if not.
:rtype: bool
"""
self.log.info('Poking for %s', self.attachment_name)
with ImapHook(imap_conn_id=self.conn_id) as imap_hook:
return imap_hook.has_mail_attachment(
name=self.attachment_name,
mail_folder=self.mail_folder,
check_regex=self.check_regex
)
|
def function[poke, parameter[self, context]]:
constant[
Pokes for a mail attachment on the mail server.
:param context: The context that is being provided when poking.
:type context: dict
:return: True if attachment with the given name is present and False if not.
:rtype: bool
]
call[name[self].log.info, parameter[constant[Poking for %s], name[self].attachment_name]]
with call[name[ImapHook], parameter[]] begin[:]
return[call[name[imap_hook].has_mail_attachment, parameter[]]]
|
keyword[def] identifier[poke] ( identifier[self] , identifier[context] ):
literal[string]
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[self] . identifier[attachment_name] )
keyword[with] identifier[ImapHook] ( identifier[imap_conn_id] = identifier[self] . identifier[conn_id] ) keyword[as] identifier[imap_hook] :
keyword[return] identifier[imap_hook] . identifier[has_mail_attachment] (
identifier[name] = identifier[self] . identifier[attachment_name] ,
identifier[mail_folder] = identifier[self] . identifier[mail_folder] ,
identifier[check_regex] = identifier[self] . identifier[check_regex]
)
|
def poke(self, context):
"""
Pokes for a mail attachment on the mail server.
:param context: The context that is being provided when poking.
:type context: dict
:return: True if attachment with the given name is present and False if not.
:rtype: bool
"""
self.log.info('Poking for %s', self.attachment_name)
with ImapHook(imap_conn_id=self.conn_id) as imap_hook:
return imap_hook.has_mail_attachment(name=self.attachment_name, mail_folder=self.mail_folder, check_regex=self.check_regex) # depends on [control=['with'], data=['imap_hook']]
|
def _new_master_key(self, key_id):
"""Returns a KMSMasterKey for the specified key_id.
:param bytes key_id: KMS CMK ID
:returns: KMS Master Key based on key_id
:rtype: aws_encryption_sdk.key_providers.kms.KMSMasterKey
:raises InvalidKeyIdError: if key_id is not a valid KMS CMK ID to which this key provider has access
"""
_key_id = to_str(key_id) # KMS client requires str, not bytes
return KMSMasterKey(config=KMSMasterKeyConfig(key_id=key_id, client=self._client(_key_id)))
|
def function[_new_master_key, parameter[self, key_id]]:
constant[Returns a KMSMasterKey for the specified key_id.
:param bytes key_id: KMS CMK ID
:returns: KMS Master Key based on key_id
:rtype: aws_encryption_sdk.key_providers.kms.KMSMasterKey
:raises InvalidKeyIdError: if key_id is not a valid KMS CMK ID to which this key provider has access
]
variable[_key_id] assign[=] call[name[to_str], parameter[name[key_id]]]
return[call[name[KMSMasterKey], parameter[]]]
|
keyword[def] identifier[_new_master_key] ( identifier[self] , identifier[key_id] ):
literal[string]
identifier[_key_id] = identifier[to_str] ( identifier[key_id] )
keyword[return] identifier[KMSMasterKey] ( identifier[config] = identifier[KMSMasterKeyConfig] ( identifier[key_id] = identifier[key_id] , identifier[client] = identifier[self] . identifier[_client] ( identifier[_key_id] )))
|
def _new_master_key(self, key_id):
"""Returns a KMSMasterKey for the specified key_id.
:param bytes key_id: KMS CMK ID
:returns: KMS Master Key based on key_id
:rtype: aws_encryption_sdk.key_providers.kms.KMSMasterKey
:raises InvalidKeyIdError: if key_id is not a valid KMS CMK ID to which this key provider has access
"""
_key_id = to_str(key_id) # KMS client requires str, not bytes
return KMSMasterKey(config=KMSMasterKeyConfig(key_id=key_id, client=self._client(_key_id)))
|
def parse_field(text, name=None, version=None, encoding_chars=None, validation_level=None,
reference=None, force_varies=False):
"""
Parse the given ER7-encoded field and return an instance of :class:`Field <hl7apy.core.Field>`.
:type text: ``str``
:param text: the ER7-encoded string containing the fields to be parsed
:type name: ``str``
:param name: the field name (e.g. MSH_7)
:type version: ``str``
:param version: the HL7 version (e.g. "2.5"), or ``None`` to use the default
(see :func:`set_default_version <hl7apy.set_default_version>`)
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`set_default_encoding_chars <hl7apy.set_default_encoding_chars>`)
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type reference: ``dict``
:param reference: a dictionary containing the element structure returned by
:func:`load_reference <hl7apy.load_reference>` or :func:`find_reference <hl7apy.find_reference>`
or belonging to a message profile
:type force_varies: ``boolean``
:param force_varies: flag that force the fields to use a varies structure when no reference is found.
It is used when a segment ends with a field of type varies that thus support infinite children
:return: an instance of :class:`Field <hl7apy.core.Field>`
>>> field = "NUCLEAR^NELDA^W"
>>> nk1_2 = parse_field(field, name="NK1_2")
>>> print(nk1_2)
<Field NK1_2 (NAME) of type XPN>
>>> print(nk1_2.to_er7())
NUCLEAR^NELDA^W
>>> unknown = parse_field(field)
>>> print(unknown)
<Field of type None>
>>> print(unknown.to_er7())
NUCLEAR^NELDA^W
"""
version = _get_version(version)
encoding_chars = _get_encoding_chars(encoding_chars, version)
validation_level = _get_validation_level(validation_level)
try:
field = Field(name, version=version, validation_level=validation_level, reference=reference)
except InvalidName:
if force_varies:
reference = ('leaf', None, 'varies', None, None, -1)
field = Field(name, version=version, validation_level=validation_level, reference=reference)
else:
field = Field(version=version, validation_level=validation_level, reference=reference)
if name in ('MSH_1', 'MSH_2'):
s = SubComponent(datatype='ST', value=text, validation_level=validation_level, version=version)
c = Component(datatype='ST', validation_level=validation_level, version=version)
c.add(s)
field.add(c)
else:
children = parse_components(text, field.datatype, version, encoding_chars, validation_level,
field.structure_by_name)
if Validator.is_tolerant(validation_level) and is_base_datatype(field.datatype, version) and \
len(children) > 1:
field.datatype = None
field.children = children
return field
|
def function[parse_field, parameter[text, name, version, encoding_chars, validation_level, reference, force_varies]]:
constant[
Parse the given ER7-encoded field and return an instance of :class:`Field <hl7apy.core.Field>`.
:type text: ``str``
:param text: the ER7-encoded string containing the fields to be parsed
:type name: ``str``
:param name: the field name (e.g. MSH_7)
:type version: ``str``
:param version: the HL7 version (e.g. "2.5"), or ``None`` to use the default
(see :func:`set_default_version <hl7apy.set_default_version>`)
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`set_default_encoding_chars <hl7apy.set_default_encoding_chars>`)
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type reference: ``dict``
:param reference: a dictionary containing the element structure returned by
:func:`load_reference <hl7apy.load_reference>` or :func:`find_reference <hl7apy.find_reference>`
or belonging to a message profile
:type force_varies: ``boolean``
:param force_varies: flag that force the fields to use a varies structure when no reference is found.
It is used when a segment ends with a field of type varies that thus support infinite children
:return: an instance of :class:`Field <hl7apy.core.Field>`
>>> field = "NUCLEAR^NELDA^W"
>>> nk1_2 = parse_field(field, name="NK1_2")
>>> print(nk1_2)
<Field NK1_2 (NAME) of type XPN>
>>> print(nk1_2.to_er7())
NUCLEAR^NELDA^W
>>> unknown = parse_field(field)
>>> print(unknown)
<Field of type None>
>>> print(unknown.to_er7())
NUCLEAR^NELDA^W
]
variable[version] assign[=] call[name[_get_version], parameter[name[version]]]
variable[encoding_chars] assign[=] call[name[_get_encoding_chars], parameter[name[encoding_chars], name[version]]]
variable[validation_level] assign[=] call[name[_get_validation_level], parameter[name[validation_level]]]
<ast.Try object at 0x7da1b0ebfbe0>
if compare[name[name] in tuple[[<ast.Constant object at 0x7da1b0ebf2b0>, <ast.Constant object at 0x7da1b0ebf1f0>]]] begin[:]
variable[s] assign[=] call[name[SubComponent], parameter[]]
variable[c] assign[=] call[name[Component], parameter[]]
call[name[c].add, parameter[name[s]]]
call[name[field].add, parameter[name[c]]]
return[name[field]]
|
keyword[def] identifier[parse_field] ( identifier[text] , identifier[name] = keyword[None] , identifier[version] = keyword[None] , identifier[encoding_chars] = keyword[None] , identifier[validation_level] = keyword[None] ,
identifier[reference] = keyword[None] , identifier[force_varies] = keyword[False] ):
literal[string]
identifier[version] = identifier[_get_version] ( identifier[version] )
identifier[encoding_chars] = identifier[_get_encoding_chars] ( identifier[encoding_chars] , identifier[version] )
identifier[validation_level] = identifier[_get_validation_level] ( identifier[validation_level] )
keyword[try] :
identifier[field] = identifier[Field] ( identifier[name] , identifier[version] = identifier[version] , identifier[validation_level] = identifier[validation_level] , identifier[reference] = identifier[reference] )
keyword[except] identifier[InvalidName] :
keyword[if] identifier[force_varies] :
identifier[reference] =( literal[string] , keyword[None] , literal[string] , keyword[None] , keyword[None] ,- literal[int] )
identifier[field] = identifier[Field] ( identifier[name] , identifier[version] = identifier[version] , identifier[validation_level] = identifier[validation_level] , identifier[reference] = identifier[reference] )
keyword[else] :
identifier[field] = identifier[Field] ( identifier[version] = identifier[version] , identifier[validation_level] = identifier[validation_level] , identifier[reference] = identifier[reference] )
keyword[if] identifier[name] keyword[in] ( literal[string] , literal[string] ):
identifier[s] = identifier[SubComponent] ( identifier[datatype] = literal[string] , identifier[value] = identifier[text] , identifier[validation_level] = identifier[validation_level] , identifier[version] = identifier[version] )
identifier[c] = identifier[Component] ( identifier[datatype] = literal[string] , identifier[validation_level] = identifier[validation_level] , identifier[version] = identifier[version] )
identifier[c] . identifier[add] ( identifier[s] )
identifier[field] . identifier[add] ( identifier[c] )
keyword[else] :
identifier[children] = identifier[parse_components] ( identifier[text] , identifier[field] . identifier[datatype] , identifier[version] , identifier[encoding_chars] , identifier[validation_level] ,
identifier[field] . identifier[structure_by_name] )
keyword[if] identifier[Validator] . identifier[is_tolerant] ( identifier[validation_level] ) keyword[and] identifier[is_base_datatype] ( identifier[field] . identifier[datatype] , identifier[version] ) keyword[and] identifier[len] ( identifier[children] )> literal[int] :
identifier[field] . identifier[datatype] = keyword[None]
identifier[field] . identifier[children] = identifier[children]
keyword[return] identifier[field]
|
def parse_field(text, name=None, version=None, encoding_chars=None, validation_level=None, reference=None, force_varies=False):
"""
Parse the given ER7-encoded field and return an instance of :class:`Field <hl7apy.core.Field>`.
:type text: ``str``
:param text: the ER7-encoded string containing the fields to be parsed
:type name: ``str``
:param name: the field name (e.g. MSH_7)
:type version: ``str``
:param version: the HL7 version (e.g. "2.5"), or ``None`` to use the default
(see :func:`set_default_version <hl7apy.set_default_version>`)
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`set_default_encoding_chars <hl7apy.set_default_encoding_chars>`)
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type reference: ``dict``
:param reference: a dictionary containing the element structure returned by
:func:`load_reference <hl7apy.load_reference>` or :func:`find_reference <hl7apy.find_reference>`
or belonging to a message profile
:type force_varies: ``boolean``
:param force_varies: flag that force the fields to use a varies structure when no reference is found.
It is used when a segment ends with a field of type varies that thus support infinite children
:return: an instance of :class:`Field <hl7apy.core.Field>`
>>> field = "NUCLEAR^NELDA^W"
>>> nk1_2 = parse_field(field, name="NK1_2")
>>> print(nk1_2)
<Field NK1_2 (NAME) of type XPN>
>>> print(nk1_2.to_er7())
NUCLEAR^NELDA^W
>>> unknown = parse_field(field)
>>> print(unknown)
<Field of type None>
>>> print(unknown.to_er7())
NUCLEAR^NELDA^W
"""
version = _get_version(version)
encoding_chars = _get_encoding_chars(encoding_chars, version)
validation_level = _get_validation_level(validation_level)
try:
field = Field(name, version=version, validation_level=validation_level, reference=reference) # depends on [control=['try'], data=[]]
except InvalidName:
if force_varies:
reference = ('leaf', None, 'varies', None, None, -1)
field = Field(name, version=version, validation_level=validation_level, reference=reference) # depends on [control=['if'], data=[]]
else:
field = Field(version=version, validation_level=validation_level, reference=reference) # depends on [control=['except'], data=[]]
if name in ('MSH_1', 'MSH_2'):
s = SubComponent(datatype='ST', value=text, validation_level=validation_level, version=version)
c = Component(datatype='ST', validation_level=validation_level, version=version)
c.add(s)
field.add(c) # depends on [control=['if'], data=[]]
else:
children = parse_components(text, field.datatype, version, encoding_chars, validation_level, field.structure_by_name)
if Validator.is_tolerant(validation_level) and is_base_datatype(field.datatype, version) and (len(children) > 1):
field.datatype = None # depends on [control=['if'], data=[]]
field.children = children
return field
|
def dict2obj(d):
"""Convert a dict to an object or namespace
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
>>> obj = dict2obj(d)
>>> obj.b.c
2
>>> obj.d
['hi', {'foo': 'bar'}]
>>> d = {'a': 1, 'b': {'c': 2}, 'd': [("hi", {'foo': "bar"})]}
>>> obj = dict2obj(d)
>>> obj.d.hi.foo
'bar'
"""
if isinstance(d, (Mapping, list, tuple)):
try:
d = dict(d)
except (ValueError, TypeError):
return d
else:
return d
obj = Object()
for k, v in viewitems(d):
obj.__dict__[k] = dict2obj(v)
return obj
|
def function[dict2obj, parameter[d]]:
constant[Convert a dict to an object or namespace
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
>>> obj = dict2obj(d)
>>> obj.b.c
2
>>> obj.d
['hi', {'foo': 'bar'}]
>>> d = {'a': 1, 'b': {'c': 2}, 'd': [("hi", {'foo': "bar"})]}
>>> obj = dict2obj(d)
>>> obj.d.hi.foo
'bar'
]
if call[name[isinstance], parameter[name[d], tuple[[<ast.Name object at 0x7da18fe91b70>, <ast.Name object at 0x7da18fe91c60>, <ast.Name object at 0x7da18fe93cd0>]]]] begin[:]
<ast.Try object at 0x7da18fe93070>
variable[obj] assign[=] call[name[Object], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b25ef580>, <ast.Name object at 0x7da1b25ee290>]]] in starred[call[name[viewitems], parameter[name[d]]]] begin[:]
call[name[obj].__dict__][name[k]] assign[=] call[name[dict2obj], parameter[name[v]]]
return[name[obj]]
|
keyword[def] identifier[dict2obj] ( identifier[d] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[d] ,( identifier[Mapping] , identifier[list] , identifier[tuple] )):
keyword[try] :
identifier[d] = identifier[dict] ( identifier[d] )
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[return] identifier[d]
keyword[else] :
keyword[return] identifier[d]
identifier[obj] = identifier[Object] ()
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[viewitems] ( identifier[d] ):
identifier[obj] . identifier[__dict__] [ identifier[k] ]= identifier[dict2obj] ( identifier[v] )
keyword[return] identifier[obj]
|
def dict2obj(d):
"""Convert a dict to an object or namespace
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
>>> obj = dict2obj(d)
>>> obj.b.c
2
>>> obj.d
['hi', {'foo': 'bar'}]
>>> d = {'a': 1, 'b': {'c': 2}, 'd': [("hi", {'foo': "bar"})]}
>>> obj = dict2obj(d)
>>> obj.d.hi.foo
'bar'
"""
if isinstance(d, (Mapping, list, tuple)):
try:
d = dict(d) # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
return d # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
return d
obj = Object()
for (k, v) in viewitems(d):
obj.__dict__[k] = dict2obj(v) # depends on [control=['for'], data=[]]
return obj
|
def _compute_all_features(self):
"""Computes all the features (beatsync, framesync) from the audio."""
# Read actual audio waveform
self._audio, _ = librosa.load(self.file_struct.audio_file,
sr=self.sr)
# Get duration of audio file
self.dur = len(self._audio) / float(self.sr)
# Compute actual features
self._framesync_features = self.compute_features()
# Compute framesync times
self._compute_framesync_times()
# Compute/Read beats
self._est_beats_times, self._est_beats_frames = self.estimate_beats()
self._ann_beats_times, self._ann_beats_frames = self.read_ann_beats()
# Beat-Synchronize
pad = True # Always append to the end of the features
self._est_beatsync_features, self._est_beatsync_times = \
self.compute_beat_sync_features(self._est_beats_frames,
self._est_beats_times, pad)
self._ann_beatsync_features, self._ann_beatsync_times = \
self.compute_beat_sync_features(self._ann_beats_frames,
self._ann_beats_times, pad)
|
def function[_compute_all_features, parameter[self]]:
constant[Computes all the features (beatsync, framesync) from the audio.]
<ast.Tuple object at 0x7da1b0552110> assign[=] call[name[librosa].load, parameter[name[self].file_struct.audio_file]]
name[self].dur assign[=] binary_operation[call[name[len], parameter[name[self]._audio]] / call[name[float], parameter[name[self].sr]]]
name[self]._framesync_features assign[=] call[name[self].compute_features, parameter[]]
call[name[self]._compute_framesync_times, parameter[]]
<ast.Tuple object at 0x7da1b02ce7a0> assign[=] call[name[self].estimate_beats, parameter[]]
<ast.Tuple object at 0x7da1b02cc8e0> assign[=] call[name[self].read_ann_beats, parameter[]]
variable[pad] assign[=] constant[True]
<ast.Tuple object at 0x7da1b02ce7d0> assign[=] call[name[self].compute_beat_sync_features, parameter[name[self]._est_beats_frames, name[self]._est_beats_times, name[pad]]]
<ast.Tuple object at 0x7da1b02ce950> assign[=] call[name[self].compute_beat_sync_features, parameter[name[self]._ann_beats_frames, name[self]._ann_beats_times, name[pad]]]
|
keyword[def] identifier[_compute_all_features] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_audio] , identifier[_] = identifier[librosa] . identifier[load] ( identifier[self] . identifier[file_struct] . identifier[audio_file] ,
identifier[sr] = identifier[self] . identifier[sr] )
identifier[self] . identifier[dur] = identifier[len] ( identifier[self] . identifier[_audio] )/ identifier[float] ( identifier[self] . identifier[sr] )
identifier[self] . identifier[_framesync_features] = identifier[self] . identifier[compute_features] ()
identifier[self] . identifier[_compute_framesync_times] ()
identifier[self] . identifier[_est_beats_times] , identifier[self] . identifier[_est_beats_frames] = identifier[self] . identifier[estimate_beats] ()
identifier[self] . identifier[_ann_beats_times] , identifier[self] . identifier[_ann_beats_frames] = identifier[self] . identifier[read_ann_beats] ()
identifier[pad] = keyword[True]
identifier[self] . identifier[_est_beatsync_features] , identifier[self] . identifier[_est_beatsync_times] = identifier[self] . identifier[compute_beat_sync_features] ( identifier[self] . identifier[_est_beats_frames] ,
identifier[self] . identifier[_est_beats_times] , identifier[pad] )
identifier[self] . identifier[_ann_beatsync_features] , identifier[self] . identifier[_ann_beatsync_times] = identifier[self] . identifier[compute_beat_sync_features] ( identifier[self] . identifier[_ann_beats_frames] ,
identifier[self] . identifier[_ann_beats_times] , identifier[pad] )
|
def _compute_all_features(self):
"""Computes all the features (beatsync, framesync) from the audio."""
# Read actual audio waveform
(self._audio, _) = librosa.load(self.file_struct.audio_file, sr=self.sr)
# Get duration of audio file
self.dur = len(self._audio) / float(self.sr)
# Compute actual features
self._framesync_features = self.compute_features()
# Compute framesync times
self._compute_framesync_times()
# Compute/Read beats
(self._est_beats_times, self._est_beats_frames) = self.estimate_beats()
(self._ann_beats_times, self._ann_beats_frames) = self.read_ann_beats()
# Beat-Synchronize
pad = True # Always append to the end of the features
(self._est_beatsync_features, self._est_beatsync_times) = self.compute_beat_sync_features(self._est_beats_frames, self._est_beats_times, pad)
(self._ann_beatsync_features, self._ann_beatsync_times) = self.compute_beat_sync_features(self._ann_beats_frames, self._ann_beats_times, pad)
|
def clone(self, label):
"""
Clones this volume to a new volume in the same region with the given label
:param label: The label for the new volume.
:returns: The new volume object.
"""
result = self._client.post('{}/clone'.format(Volume.api_endpoint),
model=self, data={'label': label})
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response cloning volume!')
return Volume(self._client, result['id'], result)
|
def function[clone, parameter[self, label]]:
constant[
Clones this volume to a new volume in the same region with the given label
:param label: The label for the new volume.
:returns: The new volume object.
]
variable[result] assign[=] call[name[self]._client.post, parameter[call[constant[{}/clone].format, parameter[name[Volume].api_endpoint]]]]
if <ast.UnaryOp object at 0x7da18fe90ca0> begin[:]
<ast.Raise object at 0x7da18fe929e0>
return[call[name[Volume], parameter[name[self]._client, call[name[result]][constant[id]], name[result]]]]
|
keyword[def] identifier[clone] ( identifier[self] , identifier[label] ):
literal[string]
identifier[result] = identifier[self] . identifier[_client] . identifier[post] ( literal[string] . identifier[format] ( identifier[Volume] . identifier[api_endpoint] ),
identifier[model] = identifier[self] , identifier[data] ={ literal[string] : identifier[label] })
keyword[if] keyword[not] literal[string] keyword[in] identifier[result] :
keyword[raise] identifier[UnexpectedResponseError] ( literal[string] )
keyword[return] identifier[Volume] ( identifier[self] . identifier[_client] , identifier[result] [ literal[string] ], identifier[result] )
|
def clone(self, label):
"""
Clones this volume to a new volume in the same region with the given label
:param label: The label for the new volume.
:returns: The new volume object.
"""
result = self._client.post('{}/clone'.format(Volume.api_endpoint), model=self, data={'label': label})
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response cloning volume!') # depends on [control=['if'], data=[]]
return Volume(self._client, result['id'], result)
|
def write_mates(self):
'''Scan the current chromosome for matches to any of the reads stored
in the read1s buffer'''
if self.chrom is not None:
U.debug("Dumping %i mates for contig %s" % (
len(self.read1s), self.chrom))
for read in self.infile.fetch(reference=self.chrom, multiple_iterators=True):
if any((read.is_unmapped, read.mate_is_unmapped, read.is_read1)):
continue
key = read.query_name, read.reference_name, read.reference_start
if key in self.read1s:
self.outfile.write(read)
self.read1s.remove(key)
U.debug("%i mates remaining" % len(self.read1s))
|
def function[write_mates, parameter[self]]:
constant[Scan the current chromosome for matches to any of the reads stored
in the read1s buffer]
if compare[name[self].chrom is_not constant[None]] begin[:]
call[name[U].debug, parameter[binary_operation[constant[Dumping %i mates for contig %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20c7946a0>, <ast.Attribute object at 0x7da20c796050>]]]]]
for taget[name[read]] in starred[call[name[self].infile.fetch, parameter[]]] begin[:]
if call[name[any], parameter[tuple[[<ast.Attribute object at 0x7da20c7966b0>, <ast.Attribute object at 0x7da20c794f70>, <ast.Attribute object at 0x7da20c794d60>]]]] begin[:]
continue
variable[key] assign[=] tuple[[<ast.Attribute object at 0x7da20c795210>, <ast.Attribute object at 0x7da20c796a10>, <ast.Attribute object at 0x7da20c794cd0>]]
if compare[name[key] in name[self].read1s] begin[:]
call[name[self].outfile.write, parameter[name[read]]]
call[name[self].read1s.remove, parameter[name[key]]]
call[name[U].debug, parameter[binary_operation[constant[%i mates remaining] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[self].read1s]]]]]
|
keyword[def] identifier[write_mates] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[chrom] keyword[is] keyword[not] keyword[None] :
identifier[U] . identifier[debug] ( literal[string] %(
identifier[len] ( identifier[self] . identifier[read1s] ), identifier[self] . identifier[chrom] ))
keyword[for] identifier[read] keyword[in] identifier[self] . identifier[infile] . identifier[fetch] ( identifier[reference] = identifier[self] . identifier[chrom] , identifier[multiple_iterators] = keyword[True] ):
keyword[if] identifier[any] (( identifier[read] . identifier[is_unmapped] , identifier[read] . identifier[mate_is_unmapped] , identifier[read] . identifier[is_read1] )):
keyword[continue]
identifier[key] = identifier[read] . identifier[query_name] , identifier[read] . identifier[reference_name] , identifier[read] . identifier[reference_start]
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[read1s] :
identifier[self] . identifier[outfile] . identifier[write] ( identifier[read] )
identifier[self] . identifier[read1s] . identifier[remove] ( identifier[key] )
identifier[U] . identifier[debug] ( literal[string] % identifier[len] ( identifier[self] . identifier[read1s] ))
|
def write_mates(self):
"""Scan the current chromosome for matches to any of the reads stored
in the read1s buffer"""
if self.chrom is not None:
U.debug('Dumping %i mates for contig %s' % (len(self.read1s), self.chrom)) # depends on [control=['if'], data=[]]
for read in self.infile.fetch(reference=self.chrom, multiple_iterators=True):
if any((read.is_unmapped, read.mate_is_unmapped, read.is_read1)):
continue # depends on [control=['if'], data=[]]
key = (read.query_name, read.reference_name, read.reference_start)
if key in self.read1s:
self.outfile.write(read)
self.read1s.remove(key) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['read']]
U.debug('%i mates remaining' % len(self.read1s))
|
def get_domain_realm(self, path_info, environ):
"""Resolve a relative url to the appropriate realm name."""
realm = self._calc_realm_from_path_provider(path_info, environ)
return realm
|
def function[get_domain_realm, parameter[self, path_info, environ]]:
constant[Resolve a relative url to the appropriate realm name.]
variable[realm] assign[=] call[name[self]._calc_realm_from_path_provider, parameter[name[path_info], name[environ]]]
return[name[realm]]
|
keyword[def] identifier[get_domain_realm] ( identifier[self] , identifier[path_info] , identifier[environ] ):
literal[string]
identifier[realm] = identifier[self] . identifier[_calc_realm_from_path_provider] ( identifier[path_info] , identifier[environ] )
keyword[return] identifier[realm]
|
def get_domain_realm(self, path_info, environ):
"""Resolve a relative url to the appropriate realm name."""
realm = self._calc_realm_from_path_provider(path_info, environ)
return realm
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.