code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _cumsum(group_idx, a, size, fill_value=None, dtype=None):
"""
N to N aggregate operation of cumsum. Perform cumulative sum for each group.
group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
_cumsum(group_idx, a, np.max(group_idx) + 1)
>>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39])
"""
sortidx = np.argsort(group_idx, kind='mergesort')
invsortidx = np.argsort(sortidx, kind='mergesort')
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
a_srt_cumsum = np.cumsum(a_srt, dtype=dtype)
increasing = np.arange(len(a), dtype=int)
group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt]
a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts]
return a_srt_cumsum[invsortidx]
|
N to N aggregate operation of cumsum. Perform cumulative sum for each group.
group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
_cumsum(group_idx, a, np.max(group_idx) + 1)
>>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39])
|
def get(self, sid):
"""
Constructs a VerificationContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.verify.v2.service.verification.VerificationContext
:rtype: twilio.rest.verify.v2.service.verification.VerificationContext
"""
return VerificationContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
|
Constructs a VerificationContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.verify.v2.service.verification.VerificationContext
:rtype: twilio.rest.verify.v2.service.verification.VerificationContext
|
def is_possible_type(
self, abstract_type: GraphQLAbstractType, possible_type: GraphQLObjectType
) -> bool:
"""Check whether a concrete type is possible for an abstract type."""
possible_type_map = self._possible_type_map
try:
possible_type_names = possible_type_map[abstract_type.name]
except KeyError:
possible_types = self.get_possible_types(abstract_type)
possible_type_names = {type_.name for type_ in possible_types}
possible_type_map[abstract_type.name] = possible_type_names
return possible_type.name in possible_type_names
|
Check whether a concrete type is possible for an abstract type.
|
def setup_random_seed(seed):
"""Setup the random seed. If the input seed is -1, the code will use a random seed for every run. If it is \
positive, that seed is used for all runs, thereby giving reproducible results.
Parameters
----------
seed : int
The seed of the random number generator.
"""
if seed == -1:
seed = np.random.randint(0,
int(1e9)) # Use one seed, so all regions have identical column non-uniformity.
np.random.seed(seed)
|
Setup the random seed. If the input seed is -1, the code will use a random seed for every run. If it is \
positive, that seed is used for all runs, thereby giving reproducible results.
Parameters
----------
seed : int
The seed of the random number generator.
|
def pickle_dict(items):
'''Returns a new dictionary where values which aren't instances of
basestring are pickled. Also, a new key '_pickled' contains a comma
separated list of keys corresponding to the pickled values.
'''
ret = {}
pickled_keys = []
for key, val in items.items():
if isinstance(val, basestring):
ret[key] = val
else:
pickled_keys.append(key)
ret[key] = pickle.dumps(val)
if pickled_keys:
ret['_pickled'] = ','.join(pickled_keys)
return ret
|
Returns a new dictionary where values which aren't instances of
basestring are pickled. Also, a new key '_pickled' contains a comma
separated list of keys corresponding to the pickled values.
|
def parse_string(self, string):
"""Parse ASCII output of JPrintMeta"""
self.log.info("Parsing ASCII data")
if not string:
self.log.warning("Empty metadata")
return
lines = string.splitlines()
application_data = []
application = lines[0].split()[0]
self.log.debug("Reading meta information for '%s'" % application)
for line in lines:
if application is None:
self.log.debug(
"Reading meta information for '%s'" % application
)
application = line.split()[0]
application_data.append(line)
if line.startswith(application + b' Linux'):
self._record_app_data(application_data)
application_data = []
application = None
|
Parse ASCII output of JPrintMeta
|
def execute(self, method, *args, **kargs):
result = None
'''
max 10 rechecks
'''
for i in range(0, 10):
try:
method_map = {
'get_lead_by_id': self.get_lead_by_id,
'get_multiple_leads_by_filter_type': self.get_multiple_leads_by_filter_type,
'get_multiple_leads_by_list_id': self.get_multiple_leads_by_list_id,
'get_multiple_leads_by_list_id_yield': self.get_multiple_leads_by_list_id_yield,
'get_multiple_leads_by_program_id': self.get_multiple_leads_by_program_id,
'get_multiple_leads_by_program_id_yield': self.get_multiple_leads_by_program_id_yield,
'change_lead_program_status': self.change_lead_program_status,
'create_update_leads': self.create_update_leads,
'associate_lead': self.associate_lead,
'push_lead': self.push_lead,
'merge_lead': self.merge_lead,
'get_lead_partitions': self.get_lead_partitions,
'create_list': self.create_list,
'update_list': self.update_list,
'delete_list': self.delete_list,
'get_list_by_id': self.get_list_by_id,
'get_list_by_name': self.get_list_by_name,
'get_multiple_lists': self.get_multiple_lists,
'browse_lists': self.browse_lists,
'add_leads_to_list': self.add_leads_to_list,
'remove_leads_from_list': self.remove_leads_from_list,
'member_of_list': self.member_of_list,
'get_campaign_by_id': self.get_campaign_by_id,
'get_multiple_campaigns': self.get_multiple_campaigns,
'schedule_campaign': self.schedule_campaign,
'request_campaign': self.request_campaign,
'import_lead': self.import_lead,
'get_import_lead_status': self.get_import_lead_status,
'get_import_failure_file': self.get_import_failure_file,
'get_import_warning_file': self.get_import_warning_file,
'describe': self.describe,
'get_activity_types': self.get_activity_types,
'get_paging_token': self.get_paging_token,
'get_lead_activities': self.get_lead_activities,
'get_lead_activities_yield': self.get_lead_activities_yield,
'get_lead_changes': self.get_lead_changes,
'get_lead_changes_yield': self.get_lead_changes_yield,
'add_custom_activities': self.add_custom_activities,
'get_daily_usage': self.get_daily_usage,
'get_last_7_days_usage': self.get_last_7_days_usage,
'get_daily_errors': self.get_daily_errors,
'get_last_7_days_errors': self.get_last_7_days_errors,
'delete_lead': self.delete_lead,
'get_deleted_leads': self.get_deleted_leads,
'update_leads_partition': self.update_leads_partition,
'create_folder': self.create_folder,
'get_folder_by_id': self.get_folder_by_id,
'get_folder_by_name': self.get_folder_by_name,
'get_folder_contents': self.get_folder_contents,
'update_folder': self.update_folder,
'delete_folder': self.delete_folder,
'browse_folders': self.browse_folders,
'create_token': self.create_token,
'get_tokens': self.get_tokens,
'delete_tokens': self.delete_tokens,
'create_email_template': self.create_email_template,
'get_email_template_by_id': self.get_email_template_by_id,
'get_email_template_by_name': self.get_email_template_by_name,
'update_email_template': self.update_email_template,
'delete_email_template': self.delete_email_template,
'get_email_templates': self.get_email_templates,
'get_email_templates_yield': self.get_email_templates_yield,
'get_email_template_content': self.get_email_template_content,
'update_email_template_content': self.update_email_template_content,
'approve_email_template': self.approve_email_template,
'unapprove_email_template': self.unapprove_email_template,
'discard_email_template_draft': self.discard_email_template_draft,
'clone_email_template': self.clone_email_template,
'create_email': self.create_email,
'get_email_by_id': self.get_email_by_id,
'get_email_by_name': self.get_email_by_name,
'delete_email': self.delete_email,
'update_email': self.update_email,
'get_emails': self.get_emails,
'get_emails_yield': self.get_emails_yield,
'get_email_content': self.get_email_content,
'update_email_content': self.update_email_content,
'update_email_content_in_editable_section': self.update_email_content_in_editable_section,
'get_email_dynamic_content': self.get_email_dynamic_content,
'update_email_dynamic_content': self.update_email_dynamic_content,
'approve_email': self.approve_email,
'unapprove_email': self.unapprove_email,
'discard_email_draft': self.discard_email_draft,
'clone_email': self.clone_email,
'send_sample_email': self.send_sample_email,
'get_email_full_content': self.get_email_full_content,
'create_landing_page': self.create_landing_page,
'get_landing_page_by_id': self.get_landing_page_by_id,
'get_landing_page_by_name': self.get_landing_page_by_name,
'delete_landing_page': self.delete_landing_page,
'update_landing_page': self.update_landing_page,
'get_landing_pages': self.get_landing_pages,
'get_landing_pages_yield': self.get_landing_pages_yield,
'get_landing_page_content': self.get_landing_page_content,
'create_landing_page_content_section': self.create_landing_page_content_section,
'update_landing_page_content_section': self.update_landing_page_content_section,
'delete_landing_page_content_section': self.delete_landing_page_content_section,
'get_landing_page_dynamic_content': self.get_landing_page_dynamic_content,
'update_landing_page_dynamic_content': self.update_landing_page_dynamic_content,
'approve_landing_page': self.approve_landing_page,
'unapprove_landing_page': self.unapprove_landing_page,
'discard_landing_page_draft': self.discard_landing_page_draft,
'clone_landing_page': self.clone_landing_page,
'create_form': self.create_form,
'get_form_by_id': self.get_form_by_id,
'get_form_by_name': self.get_form_by_name,
'delete_form': self.delete_form,
'update_form': self.update_form,
'get_forms': self.get_forms,
'get_forms_yield': self.get_forms_yield,
'get_form_fields': self.get_form_fields,
'create_form_field': self.create_form_field,
'update_form_field': self.update_form_field,
'delete_form_field': self.delete_form_field,
'approve_form': self.approve_form,
'unapprove_form': self.unapprove_form,
'discard_form_draft': self.discard_form_draft,
'clone_form': self.clone_form,
'create_file': self.create_file,
'get_file_by_id': self.get_file_by_id,
'get_file_by_name': self.get_file_by_name,
'list_files': self.list_files,
'get_files_yield': self.get_files_yield,
'update_file_content': self.update_file_content,
'create_snippet': self.create_snippet,
'get_snippet_by_id': self.get_snippet_by_id,
'delete_snippet': self.delete_snippet,
'update_snippet': self.update_snippet,
'get_snippets': self.get_snippets,
'get_snippets_yield': self.get_snippets_yield,
'get_snippet_content': self.get_snippet_content,
'update_snippet_content': self.update_snippet_content,
'approve_snippet': self.approve_snippet,
'unapprove_snippet': self.unapprove_snippet,
'discard_snippet_draft': self.discard_snippet_draft,
'clone_snippet': self.clone_snippet,
'update_snippet_dynamic_content': self.update_snippet_dynamic_content,
'get_snippet_dynamic_content': self.get_snippet_dynamic_content,
'get_segmentations': self.get_segmentations,
'get_segments': self.get_segments,
'create_landing_page_template': self.create_landing_page_template,
'get_landing_page_template_by_id': self.get_landing_page_template_by_id,
'get_landing_page_template_by_name': self.get_landing_page_template_by_name,
'get_landing_page_templates': self.get_landing_page_templates,
'get_landing_page_templates_yield': self.get_landing_page_templates_yield,
'get_landing_page_template_content': self.get_landing_page_template_content,
'update_landing_page_template_content': self.update_landing_page_template_content,
'update_landing_page_template': self.update_landing_page_template,
'delete_landing_page_template': self.delete_landing_page_template,
'approve_landing_page_template': self.approve_landing_page_template,
'unapprove_landing_page_template': self.unapprove_landing_page_template,
'discard_landing_page_template_draft': self.discard_landing_page_template_draft,
'clone_landing_page_template': self.clone_landing_page_template,
'create_program': self.create_program,
'get_program_by_id': self.get_program_by_id,
'get_program_by_name': self.get_program_by_name,
'get_program_by_tag_type': self.get_program_by_tag_type,
'update_program': self.update_program,
'delete_program': self.delete_program,
'browse_programs': self.browse_programs,
'get_programs_yield': self.get_programs_yield,
'clone_program': self.clone_program,
'approve_program': self.approve_program,
'unapprove_program': self.unapprove_program,
'get_channels': self.get_channels,
'get_channel_by_name': self.get_channel_by_name,
'get_tags': self.get_tags,
'get_tag_by_name': self.get_tag_by_name,
'get_list_of_custom_objects': self.get_list_of_custom_objects,
'describe_custom_object': self.describe_custom_object,
'create_update_custom_objects': self.create_update_custom_objects,
'delete_custom_objects': self.delete_custom_objects,
'get_custom_objects': self.get_custom_objects,
'describe_opportunity': self.describe_opportunity,
'create_update_opportunities': self.create_update_opportunities,
'delete_opportunities': self.delete_opportunities,
'get_opportunities': self.get_opportunities,
'describe_opportunity_role': self.describe_opportunity_role,
'create_update_opportunities_roles': self.create_update_opportunities_roles,
'delete_opportunity_roles': self.delete_opportunity_roles,
'get_opportunity_roles': self.get_opportunity_roles,
'describe_company': self.describe_company,
'create_update_companies': self.create_update_companies,
'delete_companies': self.delete_companies,
'get_companies': self.get_companies,
'describe_sales_person': self.describe_sales_person,
'create_update_sales_persons': self.create_update_sales_persons,
'delete_sales_persons': self.delete_sales_persons,
'get_sales_persons': self.get_sales_persons,
'get_custom_activity_types': self.get_custom_activity_types,
'describe_custom_activity_type': self.describe_custom_activity_type,
'create_custom_activity_type': self.create_custom_activity_type,
'update_custom_activity_type': self.update_custom_activity_type,
'approve_custom_activity_type': self.approve_custom_activity_type,
'create_custom_activity_type_attribute': self.create_custom_activity_type_attribute,
'discard_custom_activity_type_draft': self.discard_custom_activity_type_draft,
'delete_custom_activity_type': self.delete_custom_activity_type,
'update_custom_activity_type_attribute': self.update_custom_activity_type_attribute,
'delete_custom_activity_type_attribute': self.delete_custom_activity_type_attribute,
'get_leads_export_jobs_list': self.get_leads_export_jobs_list,
'get_activities_export_jobs_list': self.get_activities_export_jobs_list,
'create_leads_export_job': self.create_leads_export_job,
'create_activities_export_job': self.create_activities_export_job,
'enqueue_leads_export_job': self.enqueue_leads_export_job,
'enqueue_activities_export_job': self.enqueue_activities_export_job,
'cancel_leads_export_job': self.cancel_leads_export_job,
'cancel_activities_export_job': self.cancel_activities_export_job,
'get_leads_export_job_status': self.get_leads_export_job_status,
'get_activities_export_job_status': self.get_activities_export_job_status,
'get_leads_export_job_file': self.get_leads_export_job_file,
'get_activities_export_job_file': self.get_activities_export_job_file
}
result = method_map[method](*args, **kargs)
except MarketoException as e:
'''
601 -> auth token not valid
602 -> auth token expired
'''
if e.code in ['601', '602']:
self.authenticate()
continue
else:
raise Exception({'message': e.message, 'code': e.code})
break
return result
|
max 10 rechecks
|
async def search_participant(self, name, force_update=False):
""" search a participant by (display) name
|methcoro|
Args:
name: display name of the participant
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException
"""
if force_update or self.participants is None:
await self.get_participants()
if self.participants is not None:
for p in self.participants:
if p.name == name:
return p
return None
|
search a participant by (display) name
|methcoro|
Args:
name: display name of the participant
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException
|
def shape(self):
"""Returns a tuple of row, column, (band count if multidimensional)."""
shp = (self.ds.RasterYSize, self.ds.RasterXSize, self.ds.RasterCount)
return shp[:2] if shp[2] <= 1 else shp
|
Returns a tuple of row, column, (band count if multidimensional).
|
def default_start():
"""
Use `sys.argv` for starting parameters. This is the entry-point of `vlcp-start`
"""
(config, daemon, pidfile, startup, fork) = parsearg()
if config is None:
if os.path.isfile('/etc/vlcp.conf'):
config = '/etc/vlcp.conf'
else:
print('/etc/vlcp.conf is not found; start without configurations.')
elif not config:
config = None
main(config, startup, daemon, pidfile, fork)
|
Use `sys.argv` for starting parameters. This is the entry-point of `vlcp-start`
|
def follow(ctx, nick, url, force):
"""Add a new source to your followings."""
source = Source(nick, url)
sources = ctx.obj['conf'].following
if not force:
if source.nick in (source.nick for source in sources):
click.confirm("➤ You’re already following {0}. Overwrite?".format(
click.style(source.nick, bold=True)), default=False, abort=True)
_, status = get_remote_status([source])[0]
if not status or status.status_code != 200:
click.confirm("➤ The feed of {0} at {1} is not available. Follow anyway?".format(
click.style(source.nick, bold=True),
click.style(source.url, bold=True)), default=False, abort=True)
ctx.obj['conf'].add_source(source)
click.echo("✓ You’re now following {0}.".format(
click.style(source.nick, bold=True)))
|
Add a new source to your followings.
|
def fetchall(self, mode=5, after=0, parent='any', order_by='id',
limit=100, page=0, asc=1):
"""
Return comments for admin with :param:`mode`.
"""
fields_comments = ['tid', 'id', 'parent', 'created', 'modified',
'mode', 'remote_addr', 'text', 'author',
'email', 'website', 'likes', 'dislikes']
fields_threads = ['uri', 'title']
sql_comments_fields = ', '.join(['comments.' + f
for f in fields_comments])
sql_threads_fields = ', '.join(['threads.' + f
for f in fields_threads])
sql = ['SELECT ' + sql_comments_fields + ', ' + sql_threads_fields + ' '
'FROM comments INNER JOIN threads '
'ON comments.tid=threads.id '
'WHERE comments.mode = ? ']
sql_args = [mode]
if parent != 'any':
if parent is None:
sql.append('AND comments.parent IS NULL')
else:
sql.append('AND comments.parent=?')
sql_args.append(parent)
# custom sanitization
if order_by not in ['id', 'created', 'modified', 'likes', 'dislikes', 'tid']:
sql.append('ORDER BY ')
sql.append("comments.created")
if not asc:
sql.append(' DESC')
else:
sql.append('ORDER BY ')
sql.append('comments.' + order_by)
if not asc:
sql.append(' DESC')
sql.append(", comments.created")
if limit:
sql.append('LIMIT ?,?')
sql_args.append(page * limit)
sql_args.append(limit)
rv = self.db.execute(sql, sql_args).fetchall()
for item in rv:
yield dict(zip(fields_comments + fields_threads, item))
|
Return comments for admin with :param:`mode`.
|
def type_validator(validator, types, instance, schema):
"""Swagger 1.2 supports parameters of 'type': 'File'. Skip validation of
the 'type' field in this case.
"""
if schema.get('type') == 'File':
return []
return _validators.type_draft3(validator, types, instance, schema)
|
Swagger 1.2 supports parameters of 'type': 'File'. Skip validation of
the 'type' field in this case.
|
def get_fallback_resolution(self):
"""Returns the previous fallback resolution
set by :meth:`set_fallback_resolution`,
or default fallback resolution if never set.
:returns: ``(x_pixels_per_inch, y_pixels_per_inch)``
"""
ppi = ffi.new('double[2]')
cairo.cairo_surface_get_fallback_resolution(
self._pointer, ppi + 0, ppi + 1)
return tuple(ppi)
|
Returns the previous fallback resolution
set by :meth:`set_fallback_resolution`,
or default fallback resolution if never set.
:returns: ``(x_pixels_per_inch, y_pixels_per_inch)``
|
def as_xml_index(self, basename="/tmp/sitemap.xml"):
"""Return a string of the index for a large list that is split.
All we need to do is determine the number of component sitemaps will
be is and generate their URIs based on a pattern.
Q - should there be a flag to select generation of each component sitemap
in order to calculate the md5sum?
Q - what timestamp should be used?
"""
num_parts = self.requires_multifile()
if (not num_parts):
raise ListBaseIndexError(
"Request for sitemapindex for list with only %d entries when max_sitemap_entries is set to %s" %
(len(self), str(
self.max_sitemap_entries)))
index = ListBase()
index.sitemapindex = True
index.capability_name = self.capability_name
index.default_capability()
for n in range(num_parts):
r = Resource(uri=self.part_name(basename, n))
index.add(r)
return(index.as_xml())
|
Return a string of the index for a large list that is split.
All we need to do is determine the number of component sitemaps will
be is and generate their URIs based on a pattern.
Q - should there be a flag to select generation of each component sitemap
in order to calculate the md5sum?
Q - what timestamp should be used?
|
def find():
"""Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew
"""
spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
for path in [
'/usr/local/opt/apache-spark/libexec', # OS X Homebrew
'/usr/lib/spark/', # AWS Amazon EMR
'/usr/local/spark/', # common linux path for spark
'/opt/spark/', # other common linux path for spark
# Any other common places to look?
]:
if os.path.exists(path):
spark_home = path
break
if not spark_home:
raise ValueError("Couldn't find Spark, make sure SPARK_HOME env is set"
" or Spark is in an expected location (e.g. from homebrew installation).")
return spark_home
|
Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew
|
def parse_timespan_value(s):
"""Parse a string that contains a time span, optionally with a unit like s.
@return the number of seconds encoded by the string
"""
number, unit = split_number_and_unit(s)
if not unit or unit == "s":
return number
elif unit == "min":
return number * 60
elif unit == "h":
return number * 60 * 60
elif unit == "d":
return number * 24 * 60 * 60
else:
raise ValueError('unknown unit: {} (allowed are s, min, h, and d)'.format(unit))
|
Parse a string that contains a time span, optionally with a unit like s.
@return the number of seconds encoded by the string
|
def adjustPoolSize(self, newsize):
"""
Change the target pool size. If we have too many connections already,
ask some to finish what they're doing and die (preferring to kill
connections to the node that already has the most connections). If
we have too few, create more.
"""
if newsize < 0:
raise ValueError("pool size must be nonnegative")
self.log("Adjust pool size from %d to %d." % (self.target_pool_size, newsize))
self.target_pool_size = newsize
self.kill_excess_pending_conns()
self.kill_excess_conns()
self.fill_pool()
|
Change the target pool size. If we have too many connections already,
ask some to finish what they're doing and die (preferring to kill
connections to the node that already has the most connections). If
we have too few, create more.
|
def parse_segment(text, version=None, encoding_chars=None, validation_level=None, reference=None):
"""
Parse the given ER7-encoded segment and return an instance of :class:`Segment <hl7apy.core.Segment>`.
:type text: ``str``
:param text: the ER7-encoded string containing the segment to be parsed
:type version: ``str``
:param version: the HL7 version (e.g. "2.5"), or ``None`` to use the default
(see :func:`set_default_version <hl7apy.set_default_version>`)
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`set_default_encoding_chars <hl7apy.set_default_encoding_chars>`)
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type reference: ``dict``
:param reference: a dictionary containing the element structure returned by
:func:`load_reference <hl7apy.load_reference>`, :func:`find_reference <hl7apy.find_reference>` or
belonging to a message profile
:return: an instance of :class:`Segment <hl7apy.core.Segment>`
>>> segment = "EVN||20080115153000||||20080114003000"
>>> s = parse_segment(segment)
>>> print(s)
<Segment EVN>
>>> print(s.to_er7())
EVN||20080115153000||||20080114003000
"""
version = _get_version(version)
encoding_chars = _get_encoding_chars(encoding_chars, version)
validation_level = _get_validation_level(validation_level)
segment_name = text[:3]
text = text[4:] if segment_name != 'MSH' else text[3:]
segment = Segment(segment_name, version=version, validation_level=validation_level,
reference=reference)
segment.children = parse_fields(text, segment_name, version, encoding_chars, validation_level,
segment.structure_by_name, segment.allow_infinite_children)
return segment
|
Parse the given ER7-encoded segment and return an instance of :class:`Segment <hl7apy.core.Segment>`.
:type text: ``str``
:param text: the ER7-encoded string containing the segment to be parsed
:type version: ``str``
:param version: the HL7 version (e.g. "2.5"), or ``None`` to use the default
(see :func:`set_default_version <hl7apy.set_default_version>`)
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`set_default_encoding_chars <hl7apy.set_default_encoding_chars>`)
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type reference: ``dict``
:param reference: a dictionary containing the element structure returned by
:func:`load_reference <hl7apy.load_reference>`, :func:`find_reference <hl7apy.find_reference>` or
belonging to a message profile
:return: an instance of :class:`Segment <hl7apy.core.Segment>`
>>> segment = "EVN||20080115153000||||20080114003000"
>>> s = parse_segment(segment)
>>> print(s)
<Segment EVN>
>>> print(s.to_er7())
EVN||20080115153000||||20080114003000
|
def depth_profile(list_, max_depth=None, compress_homogenous=True, compress_consecutive=False, new_depth=False):
r"""
Returns a nested list corresponding the shape of the nested structures
lists represent depth, tuples represent shape. The values of the items do
not matter. only the lengths.
Args:
list_ (list):
max_depth (None):
compress_homogenous (bool):
compress_consecutive (bool): experimental
CommandLine:
python -m utool.util_list --test-depth_profile
Setup:
>>> from utool.util_list import * # NOQA
Example0:
>>> # ENABLE_DOCTEST
>>> list_ = [[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
>>> result = depth_profile(list_)
>>> print(result)
(2, 3, 4)
Example1:
>>> # ENABLE_DOCTEST
>>> list_ = [[[[[1]]], [3, 4, 33]], [[1], [2, 3], [4, [5, 5]]], [1, 3]]
>>> result = depth_profile(list_)
>>> print(result)
[[(1, 1, 1), 3], [1, 2, [1, 2]], 2]
Example2:
>>> # ENABLE_DOCTEST
>>> list_ = [[[[[1]]], [3, 4, 33]], [[1], [2, 3], [4, [5, 5]]], [1, 3]]
>>> result = depth_profile(list_, max_depth=1)
>>> print(result)
[[(1, '1'), 3], [1, 2, [1, '2']], 2]
Example3:
>>> # ENABLE_DOCTEST
>>> list_ = [[[1, 2], [1, 2, 3]], None]
>>> result = depth_profile(list_, compress_homogenous=True)
>>> print(result)
[[2, 3], 1]
Example4:
>>> # ENABLE_DOCTEST
>>> list_ = [[3, 2], [3, 2], [3, 2], [3, 2], [3, 2], [3, 2], [9, 5, 3], [2, 2]]
>>> result = depth_profile(list_, compress_homogenous=True, compress_consecutive=True)
>>> print(result)
[2] * 6 + [3, 2]
Example5:
>>> # ENABLE_DOCTEST
>>> list_ = [[[3, 9], 2], [[3, 9], 2], [[3, 9], 2], [[3, 9], 2]] #, [3, 2], [3, 2]]
>>> result = depth_profile(list_, compress_homogenous=True, compress_consecutive=True)
>>> print(result)
(4, [2, 1])
Example6:
>>> # ENABLE_DOCTEST
>>> list_ = [[[[1, 2]], [1, 2]], [[[1, 2]], [1, 2]], [[[0, 2]], [1]]]
>>> result1 = depth_profile(list_, compress_homogenous=True, compress_consecutive=False)
>>> result2 = depth_profile(list_, compress_homogenous=True, compress_consecutive=True)
>>> result = str(result1) + '\n' + str(result2)
>>> print(result)
[[(1, 2), 2], [(1, 2), 2], [(1, 2), 1]]
[[(1, 2), 2]] * 2 + [[(1, 2), 1]]
Example7:
>>> # ENABLE_DOCTEST
>>> list_ = [[{'a': [1, 2], 'b': [3, 4, 5]}, [1, 2, 3]], None]
>>> result = depth_profile(list_, compress_homogenous=True)
>>> print(result)
Example8:
>>> # ENABLE_DOCTEST
>>> list_ = [[[1]], [[[1, 1], [1, 1]]], [[[[1, 3], 1], [[1, 3, 3], 1, 1]]]]
>>> result = depth_profile(list_, compress_homogenous=True)
>>> print(result)
Example9:
>>> # ENABLE_DOCTEST
>>> list_ = []
>>> result = depth_profile(list_)
>>> print(result)
# THIS IS AN ERROR???
SHOULD BE
#[1, 1], [1, 2, 2], (1, ([1, 2]), (
Example10:
>>> # ENABLE_DOCTEST
>>> fm1 = [[0, 0], [0, 0]]
>>> fm2 = [[0, 0], [0, 0], [0, 0]]
>>> fm3 = [[0, 0], [0, 0], [0, 0], [0, 0]]
>>> list_ = [0, 0, 0]
>>> list_ = [fm1, fm2, fm3]
>>> max_depth = 0
>>> new_depth = True
>>> result = depth_profile(list_, max_depth=max_depth, new_depth=new_depth)
>>> print(result)
"""
if isinstance(list_, dict):
list_ = list(list_.values()) # handle dict
level_shape_list = []
# For a pure bottom level list return the length
if not any(map(util_type.is_listlike, list_)):
return len(list_)
if False and new_depth:
pass
# max_depth_ = None if max_depth is None else max_depth - 1
# if max_depth_ is None or max_depth_ > 0:
# pass
# else:
# for item in list_:
# if isinstance(item, dict):
# item = list(item.values()) # handle dict
# if util_type.is_listlike(item):
# if max_depth is None:
# level_shape_list.append(depth_profile(item, None))
# else:
# if max_depth >= 0:
# level_shape_list.append(depth_profile(item, max_depth - 1))
# else:
# level_shape_list.append(str(len(item)))
# else:
# level_shape_list.append(1)
else:
for item in list_:
if isinstance(item, dict):
item = list(item.values()) # handle dict
if util_type.is_listlike(item):
if max_depth is None:
level_shape_list.append(depth_profile(item, None))
else:
if max_depth >= 0:
level_shape_list.append(depth_profile(item, max_depth - 1))
else:
level_shape_list.append(str(len(item)))
else:
level_shape_list.append(1)
if compress_homogenous:
# removes redudant information by returning a shape duple
if allsame(level_shape_list):
dim_ = level_shape_list[0]
len_ = len(level_shape_list)
if isinstance(dim_, tuple):
level_shape_list = tuple([len_] + list(dim_))
else:
level_shape_list = tuple([len_, dim_])
if compress_consecutive:
hash_list = list(map(hash, map(str, level_shape_list)))
consec_list = group_consecutives(hash_list, 0)
if len(consec_list) != len(level_shape_list):
len_list = list(map(len, consec_list))
cumsum_list = np.cumsum(len_list)
consec_str = '['
thresh = 1
for len_, cumsum in zip(len_list, cumsum_list):
value = level_shape_list[cumsum - 1]
if len_ > thresh:
consec_str += str(value) + '] * ' + str(len_)
consec_str += ' + ['
else:
consec_str += str(value) + ', '
if consec_str.endswith(', '):
consec_str = consec_str[:-2]
#consec_str += ']'
#consec_str = consec_str.rstrip(', ').rstrip(']')
#consec_str = consec_str.rstrip(', ')
#if consec_str.endswith(']'):
# consec_str = consec_str[:-1]
consec_str += ']'
level_shape_list = consec_str
return level_shape_list
|
r"""
Returns a nested list corresponding the shape of the nested structures
lists represent depth, tuples represent shape. The values of the items do
not matter. only the lengths.
Args:
list_ (list):
max_depth (None):
compress_homogenous (bool):
compress_consecutive (bool): experimental
CommandLine:
python -m utool.util_list --test-depth_profile
Setup:
>>> from utool.util_list import * # NOQA
Example0:
>>> # ENABLE_DOCTEST
>>> list_ = [[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
>>> result = depth_profile(list_)
>>> print(result)
(2, 3, 4)
Example1:
>>> # ENABLE_DOCTEST
>>> list_ = [[[[[1]]], [3, 4, 33]], [[1], [2, 3], [4, [5, 5]]], [1, 3]]
>>> result = depth_profile(list_)
>>> print(result)
[[(1, 1, 1), 3], [1, 2, [1, 2]], 2]
Example2:
>>> # ENABLE_DOCTEST
>>> list_ = [[[[[1]]], [3, 4, 33]], [[1], [2, 3], [4, [5, 5]]], [1, 3]]
>>> result = depth_profile(list_, max_depth=1)
>>> print(result)
[[(1, '1'), 3], [1, 2, [1, '2']], 2]
Example3:
>>> # ENABLE_DOCTEST
>>> list_ = [[[1, 2], [1, 2, 3]], None]
>>> result = depth_profile(list_, compress_homogenous=True)
>>> print(result)
[[2, 3], 1]
Example4:
>>> # ENABLE_DOCTEST
>>> list_ = [[3, 2], [3, 2], [3, 2], [3, 2], [3, 2], [3, 2], [9, 5, 3], [2, 2]]
>>> result = depth_profile(list_, compress_homogenous=True, compress_consecutive=True)
>>> print(result)
[2] * 6 + [3, 2]
Example5:
>>> # ENABLE_DOCTEST
>>> list_ = [[[3, 9], 2], [[3, 9], 2], [[3, 9], 2], [[3, 9], 2]] #, [3, 2], [3, 2]]
>>> result = depth_profile(list_, compress_homogenous=True, compress_consecutive=True)
>>> print(result)
(4, [2, 1])
Example6:
>>> # ENABLE_DOCTEST
>>> list_ = [[[[1, 2]], [1, 2]], [[[1, 2]], [1, 2]], [[[0, 2]], [1]]]
>>> result1 = depth_profile(list_, compress_homogenous=True, compress_consecutive=False)
>>> result2 = depth_profile(list_, compress_homogenous=True, compress_consecutive=True)
>>> result = str(result1) + '\n' + str(result2)
>>> print(result)
[[(1, 2), 2], [(1, 2), 2], [(1, 2), 1]]
[[(1, 2), 2]] * 2 + [[(1, 2), 1]]
Example7:
>>> # ENABLE_DOCTEST
>>> list_ = [[{'a': [1, 2], 'b': [3, 4, 5]}, [1, 2, 3]], None]
>>> result = depth_profile(list_, compress_homogenous=True)
>>> print(result)
Example8:
>>> # ENABLE_DOCTEST
>>> list_ = [[[1]], [[[1, 1], [1, 1]]], [[[[1, 3], 1], [[1, 3, 3], 1, 1]]]]
>>> result = depth_profile(list_, compress_homogenous=True)
>>> print(result)
Example9:
>>> # ENABLE_DOCTEST
>>> list_ = []
>>> result = depth_profile(list_)
>>> print(result)
# THIS IS AN ERROR???
SHOULD BE
#[1, 1], [1, 2, 2], (1, ([1, 2]), (
Example10:
>>> # ENABLE_DOCTEST
>>> fm1 = [[0, 0], [0, 0]]
>>> fm2 = [[0, 0], [0, 0], [0, 0]]
>>> fm3 = [[0, 0], [0, 0], [0, 0], [0, 0]]
>>> list_ = [0, 0, 0]
>>> list_ = [fm1, fm2, fm3]
>>> max_depth = 0
>>> new_depth = True
>>> result = depth_profile(list_, max_depth=max_depth, new_depth=new_depth)
>>> print(result)
|
def bs_values_df(run_list, estimator_list, estimator_names, n_simulate,
**kwargs):
"""Computes a data frame of bootstrap resampled values.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int
Number of bootstrap replications to use on each run.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
bs_values_df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d array of bootstrap resampled values for the run
and estimator.
"""
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {'desc': 'bs values'})
assert len(estimator_list) == len(estimator_names), (
'len(estimator_list) = {0} != len(estimator_names = {1}'
.format(len(estimator_list), len(estimator_names)))
bs_values_list = pu.parallel_apply(
nestcheck.error_analysis.run_bootstrap_values, run_list,
func_args=(estimator_list,), func_kwargs={'n_simulate': n_simulate},
tqdm_kwargs=tqdm_kwargs, **kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in bs_values_list]
# Check there are the correct number of bootstrap replications in each cell
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (n_simulate,), (
'Should be n_simulate=' + str(n_simulate) + ' values in ' +
'each cell. The cell contains array with shape ' +
str(vals_shape))
return df
|
Computes a data frame of bootstrap resampled values.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int
Number of bootstrap replications to use on each run.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
bs_values_df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d array of bootstrap resampled values for the run
and estimator.
|
def find_modules_with_decorators(path,decorator_module,decorator_name):
'''
Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator.
'''
modules_paths = []
#If a path to a module file
if path[-3:] == '.py':
modules_paths.append(path)
#If a directory, Get all the .py files
else :
modules_paths += find_file_regex(path,'.*\.py$')
#Return only modules using the decorator
return [module for module in modules_paths if is_module_has_decorated(module,decorator_module,decorator_name)]
|
Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator.
|
def cumulative_distance(lat, lon,dist_int=None):
'''
;+
; CUMULATIVE_DISTANCE : permet de calculer la distance le long d'une ligne.
;
; @Author : Renaud DUSSURGET, LEGOS/CTOH
; @History :
; - Feb. 2009 : First release (adapted from calcul_distance)
;-
'''
# Define constants
#Degree to radians conversion
#deg_rad = np.pi/360.0
#Earth radius (km)
rt = 6378.137
#;Remove repeated positions
#IF (KEYWORD_SET(NOREPEAT)) THEN id = WHERE(~((lon_b_in EQ lon_a_in[0]) * (lat_b_in EQ lat_a_in[0])),okCnt) ELSE okcnt = 1
#IF (okCnt EQ 0) THEN RETURN lon_a_in, DOUBLE(0.)
nelts=lon.size
#Degree to radians conversion
lon_a = np.deg2rad(lon[0:nelts - 1])
lon_b = np.deg2rad(lon[1:nelts])
lat_a = np.deg2rad(lat[0:nelts - 1])
lat_b = np.deg2rad(lat[1:nelts])
interm = np.cos(lat_a) * np.cos(lat_b) * np.cos(lon_a - lon_b) + np.sin(lat_a) * np.sin(lat_b) #Wolfram Math World definition
dist_int=np.append(0,rt*np.arccos(interm))
return dist_int.cumsum()
|
;+
; CUMULATIVE_DISTANCE : permet de calculer la distance le long d'une ligne.
;
; @Author : Renaud DUSSURGET, LEGOS/CTOH
; @History :
; - Feb. 2009 : First release (adapted from calcul_distance)
;-
|
def receive_message(self, message, data):
""" Currently not doing anything with received messages. """
if data['type'] == TYPE_RESPONSE_STATUS:
self.is_launched = True
return True
|
Currently not doing anything with received messages.
|
def show_proportions(adata):
"""Fraction of spliced/unspliced/ambiguous abundances
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
Returns
-------
Prints the fractions of abundances.
"""
layers_keys = [key for key in ['spliced', 'unspliced', 'ambiguous'] if key in adata.layers.keys()]
tot_mol_cell_layers = [adata.layers[key].sum(1) for key in layers_keys]
mean_abundances = np.round(
[np.mean(tot_mol_cell / np.sum(tot_mol_cell_layers, 0)) for tot_mol_cell in tot_mol_cell_layers], 2)
print('Abundance of ' + str(layers_keys) + ': ' + str(mean_abundances))
|
Fraction of spliced/unspliced/ambiguous abundances
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
Returns
-------
Prints the fractions of abundances.
|
def _lexical_chains(self, doc, term_concept_map):
"""
Builds lexical chains, as an adjacency matrix,
using a disambiguated term-concept map.
"""
concepts = list({c for c in term_concept_map.values()})
# Build an adjacency matrix for the graph
# Using the encoding:
# 1 = identity/synonymy, 2 = hypernymy/hyponymy, 3 = meronymy, 0 = no edge
n_cons = len(concepts)
adj_mat = np.zeros((n_cons, n_cons))
for i, c in enumerate(concepts):
# TO DO can only do i >= j since the graph is undirected
for j, c_ in enumerate(concepts):
edge = 0
if c == c_:
edge = 1
# TO DO when should simulate root be True?
elif c_ in c._shortest_hypernym_paths(simulate_root=False).keys():
edge = 2
elif c in c_._shortest_hypernym_paths(simulate_root=False).keys():
edge = 2
elif c_ in c.member_meronyms() + c.part_meronyms() + c.substance_meronyms():
edge = 3
elif c in c_.member_meronyms() + c_.part_meronyms() + c_.substance_meronyms():
edge = 3
adj_mat[i,j] = edge
# Group connected concepts by labels
concept_labels = connected_components(adj_mat, directed=False)[1]
lexical_chains = [([], []) for i in range(max(concept_labels) + 1)]
for i, concept in enumerate(concepts):
label = concept_labels[i]
lexical_chains[label][0].append(concept)
lexical_chains[label][1].append(i)
# Return the lexical chains as (concept list, adjacency sub-matrix) tuples
return [(chain, adj_mat[indices][:,indices]) for chain, indices in lexical_chains]
|
Builds lexical chains, as an adjacency matrix,
using a disambiguated term-concept map.
|
def charge(self):
"""Species charge"""
if self._reader._level == 3:
# Look for FBC charge
for ns in (FBC_V2, FBC_V1):
charge = self._root.get(_tag('charge', ns))
if charge is not None:
return self._parse_charge_string(charge)
else:
charge = self._root.get('charge')
if charge is not None:
return self._parse_charge_string(charge)
return None
|
Species charge
|
def _mom(self, kloc, cache, **kwargs):
"""
Example:
>>> dist = chaospy.J(chaospy.Uniform(), chaospy.Normal())
>>> print(numpy.around(dist.mom([[0, 0, 1], [0, 1, 1]]), 4))
[1. 0. 0.]
>>> d0 = chaospy.Uniform()
>>> dist = chaospy.J(d0, d0+chaospy.Uniform())
>>> print(numpy.around(dist.mom([1, 1]), 4))
0.5833
"""
if evaluation.get_dependencies(*list(self.inverse_map)):
raise StochasticallyDependentError(
"Joint distribution with dependencies not supported.")
output = 1.
for dist in evaluation.sorted_dependencies(self):
if dist not in self.inverse_map:
continue
idx = self.inverse_map[dist]
kloc_ = kloc[idx].reshape(1)
output *= evaluation.evaluate_moment(dist, kloc_, cache=cache)
return output
|
Example:
>>> dist = chaospy.J(chaospy.Uniform(), chaospy.Normal())
>>> print(numpy.around(dist.mom([[0, 0, 1], [0, 1, 1]]), 4))
[1. 0. 0.]
>>> d0 = chaospy.Uniform()
>>> dist = chaospy.J(d0, d0+chaospy.Uniform())
>>> print(numpy.around(dist.mom([1, 1]), 4))
0.5833
|
def parse(self,DXfield):
"""Parse the dx file and construct a DX field object with component classes.
A :class:`field` instance *DXfield* must be provided to be
filled by the parser::
DXfield_object = OpenDX.field(*args)
parse(DXfield_object)
A tokenizer turns the dx file into a stream of tokens. A
hierarchy of parsers examines the stream. The level-0 parser
('general') distinguishes comments and objects (level-1). The
object parser calls level-3 parsers depending on the object
found. The basic idea is that of a 'state machine'. There is
one parser active at any time. The main loop is the general
parser.
* Constructing the dx objects with classtype and classid is
not implemented yet.
* Unknown tokens raise an exception.
"""
self.DXfield = DXfield # OpenDX.field (used by comment parser)
self.currentobject = None # containers for data
self.objects = [] # |
self.tokens = [] # token buffer
with open(self.filename,'r') as self.dxfile:
self.use_parser('general') # parse the whole file and populate self.objects
# assemble field from objects
for o in self.objects:
if o.type == 'field':
# Almost ignore the field object; VMD, for instance,
# does not write components. To make this work
# seamlessly I have to think harder how to organize
# and use the data, eg preping the field object
# properly and the initializing. Probably should also
# check uniqueness of ids etc.
DXfield.id = o.id
continue
c = o.initialize()
self.DXfield.add(c.component,c)
# free space
del self.currentobject, self.objects
|
Parse the dx file and construct a DX field object with component classes.
A :class:`field` instance *DXfield* must be provided to be
filled by the parser::
DXfield_object = OpenDX.field(*args)
parse(DXfield_object)
A tokenizer turns the dx file into a stream of tokens. A
hierarchy of parsers examines the stream. The level-0 parser
('general') distinguishes comments and objects (level-1). The
object parser calls level-3 parsers depending on the object
found. The basic idea is that of a 'state machine'. There is
one parser active at any time. The main loop is the general
parser.
* Constructing the dx objects with classtype and classid is
not implemented yet.
* Unknown tokens raise an exception.
|
def custom_to_pmrapmdec(pmphi1,pmphi2,phi1,phi2,T=None,degree=False):
"""
NAME:
custom_to_pmrapmdec
PURPOSE:
rotate proper motions in a custom set of sky coordinates (phi1,phi2) to ICRS (ra,dec)
INPUT:
pmphi1 - proper motion in custom (multplied with cos(phi2)) [mas/yr]
pmphi2 - proper motion in phi2 [mas/yr]
phi1 - custom longitude
phi2 - custom latitude
T= matrix defining the transformation in cartesian coordinates:
new_rect = T dot old_rect
where old_rect = [cos(dec)cos(ra), cos(dec)sin(ra), sin(dec)] and similar for new_rect
degree= (False) if True, phi1 and phi2 are given in degrees (default=False)
OUTPUT:
(pmra x cos(dec), dec) for vector inputs [:,2]
HISTORY:
2019-03-02 - Written - Nathaniel Starkman (UofT)
"""
if T is None: raise ValueError("Must set T= for custom_to_pmrapmdec")
return pmrapmdec_to_custom(pmphi1, pmphi2, phi1, phi2,
T=nu.transpose(T), # T.T = inv(T)
degree=degree)
|
NAME:
custom_to_pmrapmdec
PURPOSE:
rotate proper motions in a custom set of sky coordinates (phi1,phi2) to ICRS (ra,dec)
INPUT:
pmphi1 - proper motion in custom (multplied with cos(phi2)) [mas/yr]
pmphi2 - proper motion in phi2 [mas/yr]
phi1 - custom longitude
phi2 - custom latitude
T= matrix defining the transformation in cartesian coordinates:
new_rect = T dot old_rect
where old_rect = [cos(dec)cos(ra), cos(dec)sin(ra), sin(dec)] and similar for new_rect
degree= (False) if True, phi1 and phi2 are given in degrees (default=False)
OUTPUT:
(pmra x cos(dec), dec) for vector inputs [:,2]
HISTORY:
2019-03-02 - Written - Nathaniel Starkman (UofT)
|
def coarsen_all_traces(level=2, exponential=False, axes="all", figure=None):
"""
This function does nearest-neighbor coarsening of the data. See
spinmob.fun.coarsen_data for more information.
Parameters
----------
level=2
How strongly to coarsen.
exponential=False
If True, use the exponential method (great for log-x plots).
axes="all"
Which axes to coarsen.
figure=None
Which figure to use.
"""
if axes=="gca": axes=_pylab.gca()
if axes=="all":
if not figure: f = _pylab.gcf()
axes = f.axes
if not _fun.is_iterable(axes): axes = [axes]
for a in axes:
# get the lines from the plot
lines = a.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
coarsen_line(line, level, exponential, draw=False)
_pylab.draw()
|
This function does nearest-neighbor coarsening of the data. See
spinmob.fun.coarsen_data for more information.
Parameters
----------
level=2
How strongly to coarsen.
exponential=False
If True, use the exponential method (great for log-x plots).
axes="all"
Which axes to coarsen.
figure=None
Which figure to use.
|
def set_grade(
self,
assignment_id,
student_id,
grade_value,
gradebook_id='',
**kwargs
):
"""Set numerical grade for student and assignment.
Set a numerical grade for for a student and assignment. Additional
options
for grade ``mode`` are: OVERALL_GRADE = ``1``, REGULAR_GRADE = ``2``
To set 'excused' as the grade, enter ``None`` for letter and
numeric grade values,
and pass ``x`` as the ``specialGradeValue``.
``ReturnAffectedValues`` flag determines whether or not to return
student cumulative points and
impacted assignment category grades (average and student grade).
Args:
assignment_id (str): numerical ID for assignment
student_id (str): numerical ID for student
grade_value (str): numerical grade value
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary of additional parameters
.. code-block:: python
{
u'letterGradeValue':None,
u'booleanGradeValue':None,
u'specialGradeValue':None,
u'mode':2,
u'isGradeApproved':False,
u'comment':None,
u'returnAffectedValues': True,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing response ``status`` and ``message``
.. code-block:: python
{
u'message': u'grade saved successfully',
u'status': 1
}
"""
# pylint: disable=too-many-arguments
# numericGradeValue stringified because 'x' is a possible
# value for excused grades.
grade_info = {
'studentId': student_id,
'assignmentId': assignment_id,
'mode': 2,
'comment': 'from MITx {0}'.format(time.ctime(time.time())),
'numericGradeValue': str(grade_value),
'isGradeApproved': False
}
grade_info.update(kwargs)
log.info(
"student %s set_grade=%s for assignment %s",
student_id,
grade_value,
assignment_id)
return self.post(
'grades/{gradebookId}'.format(
gradebookId=gradebook_id or self.gradebook_id
),
data=grade_info,
)
|
Set numerical grade for student and assignment.
Set a numerical grade for for a student and assignment. Additional
options
for grade ``mode`` are: OVERALL_GRADE = ``1``, REGULAR_GRADE = ``2``
To set 'excused' as the grade, enter ``None`` for letter and
numeric grade values,
and pass ``x`` as the ``specialGradeValue``.
``ReturnAffectedValues`` flag determines whether or not to return
student cumulative points and
impacted assignment category grades (average and student grade).
Args:
assignment_id (str): numerical ID for assignment
student_id (str): numerical ID for student
grade_value (str): numerical grade value
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary of additional parameters
.. code-block:: python
{
u'letterGradeValue':None,
u'booleanGradeValue':None,
u'specialGradeValue':None,
u'mode':2,
u'isGradeApproved':False,
u'comment':None,
u'returnAffectedValues': True,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing response ``status`` and ``message``
.. code-block:: python
{
u'message': u'grade saved successfully',
u'status': 1
}
|
def CopyToDateTimeString(self):
"""Copies the APFS timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or
None if the timestamp is missing or invalid.
"""
if (self._timestamp is None or self._timestamp < self._INT64_MIN or
self._timestamp > self._INT64_MAX):
return None
return super(APFSTime, self)._CopyToDateTimeString()
|
Copies the APFS timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or
None if the timestamp is missing or invalid.
|
def chunk(self, regex):
'''FIXME:
'''
chunks = []
for component in self._content:
chunks.extend(
StringComponent(component.placeholder, s) for s in
regex.split(str(component)))
for i, (chunk1, chunk2) in enumerate(
zip(chunks, islice(chunks, 1, None))):
if chunk1.placeholder is not chunk2.placeholder:
# We're crossing a boundary between placeholders
if chunk1.strip() and chunk2.strip():
# There's no whitespace at the boundary, so stick the
# chunks together
# FIXME: This sort of defeats the point of the regex, as we
# only look at whitespace:
chunks[i:i + 2] = [self.__class__((chunk1, chunk2))]
return chunks
|
FIXME:
|
def switch_to_app(self, package):
"""
activates an app that is specified by package. Selects the first
app it finds in the app list
:param package: name of package/app
:type package: str
:return: None
:rtype: None
"""
log.debug("switching to app '{}'...".format(package))
cmd, url = DEVICE_URLS["switch_to_app"]
widget_id = self._get_widget_id(package)
url = url.format('{}', package, widget_id)
self.result = self._exec(cmd, url)
|
activates an app that is specified by package. Selects the first
app it finds in the app list
:param package: name of package/app
:type package: str
:return: None
:rtype: None
|
def list_js_files(dir):
"""
Generator for all JavaScript files in the directory, recursively
>>> 'examples/module.js' in list(list_js_files('examples'))
True
"""
for dirpath, dirnames, filenames in os.walk(dir):
for filename in filenames:
if is_js_file(filename):
yield os.path.join(dirpath, filename)
|
Generator for all JavaScript files in the directory, recursively
>>> 'examples/module.js' in list(list_js_files('examples'))
True
|
def merge_dictionaries(a, b):
"""Merge two dictionaries; duplicate keys get value from b."""
res = {}
for k in a:
res[k] = a[k]
for k in b:
res[k] = b[k]
return res
|
Merge two dictionaries; duplicate keys get value from b.
|
def normalized(self):
""":obj:`DualQuaternion`: This quaternion with qr normalized.
"""
qr = self.qr /1./ np.linalg.norm(self.qr)
return DualQuaternion(qr, self.qd, True)
|
:obj:`DualQuaternion`: This quaternion with qr normalized.
|
def from_directory(cls, directory):
"""Create a security object from a directory.
Relies on standard names for each file (``skein.crt`` and
``skein.pem``)."""
cert_path = os.path.join(directory, 'skein.crt')
key_path = os.path.join(directory, 'skein.pem')
for path, name in [(cert_path, 'cert'), (key_path, 'key')]:
if not os.path.exists(path):
raise context.FileNotFoundError(
"Security %s file not found at %r" % (name, path)
)
return Security(cert_file=cert_path, key_file=key_path)
|
Create a security object from a directory.
Relies on standard names for each file (``skein.crt`` and
``skein.pem``).
|
def find_raw_devices(vendor=None, product=None, serial_number=None,
custom_match=None, **kwargs):
"""Find connected USB RAW devices. See usbutil.find_devices for more info.
"""
def is_usbraw(dev):
if custom_match and not custom_match(dev):
return False
return bool(find_interfaces(dev, bInterfaceClass=0xFF,
bInterfaceSubClass=0xFF))
return find_devices(vendor, product, serial_number, is_usbraw, **kwargs)
|
Find connected USB RAW devices. See usbutil.find_devices for more info.
|
def log(self, n=None, template=None):
"""
Run the repository log command
Returns:
str: output of log command (``bzr log -l <n>``)
"""
cmd = ['bzr', 'log']
if n:
cmd.append('-l%d' % n)
return self.sh(cmd, shell=False)
|
Run the repository log command
Returns:
str: output of log command (``bzr log -l <n>``)
|
def readSTATION0(path, stations):
"""
Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)]
"""
stalist = []
f = open(path + '/STATION0.HYP', 'r')
for line in f:
if line[1:6].strip() in stations:
station = line[1:6].strip()
lat = line[6:14] # Format is either ddmm.mmS/N or ddmm(.)mmmS/N
if lat[-1] == 'S':
NS = -1
else:
NS = 1
if lat[4] == '.':
lat = (int(lat[0:2]) + float(lat[2:-1]) / 60) * NS
else:
lat = (int(lat[0:2]) + float(lat[2:4] + '.' + lat[4:-1]) /
60) * NS
lon = line[14:23]
if lon[-1] == 'W':
EW = -1
else:
EW = 1
if lon[5] == '.':
lon = (int(lon[0:3]) + float(lon[3:-1]) / 60) * EW
else:
lon = (int(lon[0:3]) + float(lon[3:5] + '.' + lon[5:-1]) /
60) * EW
elev = float(line[23:-1].strip())
# Note, negative altitude can be indicated in 1st column
if line[0] == '-':
elev *= -1
stalist.append((station, lat, lon, elev))
f.close()
f = open('station.dat', 'w')
for sta in stalist:
line = ''.join([sta[0].ljust(5), _cc_round(sta[1], 4).ljust(10),
_cc_round(sta[2], 4).ljust(10),
_cc_round(sta[3] / 1000, 4).rjust(7), '\n'])
f.write(line)
f.close()
return stalist
|
Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)]
|
def sg_int(tensor, opt):
r"""Casts a tensor to intx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`.
"""
return tf.cast(tensor, tf.sg_intx, name=opt.name)
|
r"""Casts a tensor to intx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`.
|
def write_squonk_datasetmetadata(outputBase, thinOutput, valueClassMappings, datasetMetaProps, fieldMetaProps):
"""This is a temp hack to write the minimal metadata that Squonk needs.
Will needs to be replaced with something that allows something more complete to be written.
:param outputBase: Base name for the file to write to
:param thinOutput: Write only new data, not structures. Result type will be BasicObject
:param valueClasses: A dict that describes the Java class of the value properties (used by Squonk)
:param datasetMetaProps: A dict with metadata properties that describe the datset as a whole.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
:param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value
is the name of the field being described, and a key name values wholes values is a map of metadata properties.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
"""
meta = {}
props = {}
# TODO add created property - how to handle date formats?
if datasetMetaProps:
props.update(datasetMetaProps)
if fieldMetaProps:
meta["fieldMetaProps"] = fieldMetaProps
if len(props) > 0:
meta["properties"] = props
if valueClassMappings:
meta["valueClassMappings"] = valueClassMappings
if thinOutput:
meta['type'] = 'org.squonk.types.BasicObject'
else:
meta['type'] = 'org.squonk.types.MoleculeObject'
s = json.dumps(meta)
meta = open(outputBase + '.metadata', 'w')
meta.write(s)
meta.close()
|
This is a temp hack to write the minimal metadata that Squonk needs.
Will needs to be replaced with something that allows something more complete to be written.
:param outputBase: Base name for the file to write to
:param thinOutput: Write only new data, not structures. Result type will be BasicObject
:param valueClasses: A dict that describes the Java class of the value properties (used by Squonk)
:param datasetMetaProps: A dict with metadata properties that describe the datset as a whole.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
:param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value
is the name of the field being described, and a key name values wholes values is a map of metadata properties.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
|
def convert(credentials):
"""Convert oauth2client credentials to google-auth credentials.
This class converts:
- :class:`oauth2client.client.OAuth2Credentials` to
:class:`google.oauth2.credentials.Credentials`.
- :class:`oauth2client.client.GoogleCredentials` to
:class:`google.oauth2.credentials.Credentials`.
- :class:`oauth2client.service_account.ServiceAccountCredentials` to
:class:`google.oauth2.service_account.Credentials`.
- :class:`oauth2client.service_account._JWTAccessCredentials` to
:class:`google.oauth2.service_account.Credentials`.
- :class:`oauth2client.contrib.gce.AppAssertionCredentials` to
:class:`google.auth.compute_engine.Credentials`.
- :class:`oauth2client.contrib.appengine.AppAssertionCredentials` to
:class:`google.auth.app_engine.Credentials`.
Returns:
google.auth.credentials.Credentials: The converted credentials.
Raises:
ValueError: If the credentials could not be converted.
"""
credentials_class = type(credentials)
try:
return _CLASS_CONVERSION_MAP[credentials_class](credentials)
except KeyError as caught_exc:
new_exc = ValueError(_CONVERT_ERROR_TMPL.format(credentials_class))
six.raise_from(new_exc, caught_exc)
|
Convert oauth2client credentials to google-auth credentials.
This class converts:
- :class:`oauth2client.client.OAuth2Credentials` to
:class:`google.oauth2.credentials.Credentials`.
- :class:`oauth2client.client.GoogleCredentials` to
:class:`google.oauth2.credentials.Credentials`.
- :class:`oauth2client.service_account.ServiceAccountCredentials` to
:class:`google.oauth2.service_account.Credentials`.
- :class:`oauth2client.service_account._JWTAccessCredentials` to
:class:`google.oauth2.service_account.Credentials`.
- :class:`oauth2client.contrib.gce.AppAssertionCredentials` to
:class:`google.auth.compute_engine.Credentials`.
- :class:`oauth2client.contrib.appengine.AppAssertionCredentials` to
:class:`google.auth.app_engine.Credentials`.
Returns:
google.auth.credentials.Credentials: The converted credentials.
Raises:
ValueError: If the credentials could not be converted.
|
def split_into(iterable, sizes):
"""Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
"""
# convert the iterable argument into an iterator so its contents can
# be consumed by islice in case it is a generator
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
|
Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns.
|
def ls(args):
"""
List S3 buckets. See also "aws s3 ls". Use "aws s3 ls NAME" to list bucket contents.
"""
table = []
for bucket in filter_collection(resources.s3.buckets, args):
bucket.LocationConstraint = clients.s3.get_bucket_location(Bucket=bucket.name)["LocationConstraint"]
cloudwatch = resources.cloudwatch
bucket_region = bucket.LocationConstraint or "us-east-1"
if bucket_region != cloudwatch.meta.client.meta.region_name:
cloudwatch = boto3.Session(region_name=bucket_region).resource("cloudwatch")
data = get_cloudwatch_metric_stats("AWS/S3", "NumberOfObjects",
start_time=datetime.utcnow() - timedelta(days=2),
end_time=datetime.utcnow(), period=3600, BucketName=bucket.name,
StorageType="AllStorageTypes", resource=cloudwatch)
bucket.NumberOfObjects = int(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None
data = get_cloudwatch_metric_stats("AWS/S3", "BucketSizeBytes",
start_time=datetime.utcnow() - timedelta(days=2),
end_time=datetime.utcnow(), period=3600, BucketName=bucket.name,
StorageType="StandardStorage", resource=cloudwatch)
bucket.BucketSizeBytes = format_number(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None
table.append(bucket)
page_output(tabulate(table, args))
|
List S3 buckets. See also "aws s3 ls". Use "aws s3 ls NAME" to list bucket contents.
|
def write_peps(self, peps, reverse_seqs):
"""Writes peps to db. We can reverse to be able to look up
peptides that have some amino acids missing at the N-terminal.
This way we can still use the index.
"""
if reverse_seqs:
peps = [(x[0][::-1],) for x in peps]
cursor = self.get_cursor()
cursor.executemany(
'INSERT INTO known_searchspace(seqs) VALUES (?)', peps)
self.conn.commit()
|
Writes peps to db. We can reverse to be able to look up
peptides that have some amino acids missing at the N-terminal.
This way we can still use the index.
|
def merge(self, data, clean=False, validate=False):
""" Merge a dict with the model
This is needed because schematics doesn't auto cast
values when assigned. This method allows us to ensure
incoming data & existing data on a model are always
coerced properly.
We create a temporary model instance with just the new
data so all the features of schematics deserialization
are still available.
:param data:
dict of potentially new different data to merge
:param clean:
set the dirty bit back to clean. This is useful
when the merge is coming from the store where
the data could have been mutated & the new merged
in data is now the single source of truth.
:param validate:
run the schematics validate method
:return:
nothing.. it has mutation side effects
"""
try:
model = self.__class__(data)
except ConversionError as errors:
abort(self.to_exceptions(errors.messages))
for key, val in model.to_native().items():
if key in data:
setattr(self, key, val)
if validate:
try:
self.validate()
except ModelValidationError as errors:
abort(self.to_exceptions(errors.messages))
if clean:
self._original = self.to_native()
|
Merge a dict with the model
This is needed because schematics doesn't auto cast
values when assigned. This method allows us to ensure
incoming data & existing data on a model are always
coerced properly.
We create a temporary model instance with just the new
data so all the features of schematics deserialization
are still available.
:param data:
dict of potentially new different data to merge
:param clean:
set the dirty bit back to clean. This is useful
when the merge is coming from the store where
the data could have been mutated & the new merged
in data is now the single source of truth.
:param validate:
run the schematics validate method
:return:
nothing.. it has mutation side effects
|
def image_list(auth=None, **kwargs):
'''
List images
CLI Example:
.. code-block:: bash
salt '*' glanceng.image_list
salt '*' glanceng.image_list
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.list_images(**kwargs)
|
List images
CLI Example:
.. code-block:: bash
salt '*' glanceng.image_list
salt '*' glanceng.image_list
|
def _init(self):
"""Given GO ids and GOTerm objects, create mini GO dag."""
for goid in self.godag.go_sources:
goobj = self.godag.go2obj[goid]
self.godag.go2obj[goid] = goobj
# Traverse up parents
if self.traverse_parent and goid not in self.seen_cids:
self._traverse_parent_objs(goobj)
# Traverse down children
if self.traverse_child and goid not in self.seen_pids:
self._traverse_child_objs(goobj)
|
Given GO ids and GOTerm objects, create mini GO dag.
|
def f1_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the F1 score.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import f1_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> f1_score(y_true, y_pred)
0.50
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
score = 2 * p * r / (p + r) if p + r > 0 else 0
return score
|
Compute the F1 score.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import f1_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> f1_score(y_true, y_pred)
0.50
|
def _parse_memory_embedded_health(self, data):
"""Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: memory size in MB.
:raises IloError, if unable to get the memory details.
"""
memory_mb = 0
memory = self._get_memory_details_value_based_on_model(data)
if memory is None:
msg = "Unable to get memory data. Error: Data missing"
raise exception.IloError(msg)
total_memory_size = 0
for memory_item in memory:
memsize = memory_item[self.MEMORY_SIZE_TAG]["VALUE"]
if memsize != self.MEMORY_SIZE_NOT_PRESENT_TAG:
memory_bytes = (
strutils.string_to_bytes(
memsize.replace(' ', ''), return_int=True))
memory_mb = int(memory_bytes / (1024 * 1024))
total_memory_size = total_memory_size + memory_mb
return total_memory_size
|
Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: memory size in MB.
:raises IloError, if unable to get the memory details.
|
def is_ipv6_ok(soft_fail=False):
"""
Check if IPv6 support is present and ip6tables functional
:param soft_fail: If set to True and IPv6 support is broken, then reports
that the host doesn't have IPv6 support, otherwise a
UFWIPv6Error exception is raised.
:returns: True if IPv6 is working, False otherwise
"""
# do we have IPv6 in the machine?
if os.path.isdir('/proc/sys/net/ipv6'):
# is ip6tables kernel module loaded?
if not is_module_loaded('ip6_tables'):
# ip6tables support isn't complete, let's try to load it
try:
modprobe('ip6_tables')
# great, we can load the module
return True
except subprocess.CalledProcessError as ex:
hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
level="WARN")
# we are in a world where ip6tables isn't working
if soft_fail:
# so we inform that the machine doesn't have IPv6
return False
else:
raise UFWIPv6Error("IPv6 firewall support broken")
else:
# the module is present :)
return True
else:
# the system doesn't have IPv6
return False
|
Check if IPv6 support is present and ip6tables functional
:param soft_fail: If set to True and IPv6 support is broken, then reports
that the host doesn't have IPv6 support, otherwise a
UFWIPv6Error exception is raised.
:returns: True if IPv6 is working, False otherwise
|
def rowsBeforeValue(self, value, count):
"""
Retrieve display data for rows with sort-column values less than the
given value.
@type value: Some type compatible with the current sort column.
@param value: Starting value in the index for the current sort column
at which to start returning results. Rows with a column value for the
current sort column which is less than this value will be returned.
@type count: C{int}
@param count: The number of rows to return.
@return: A list of row data, ordered by the current sort column, ending
at C{value} and containing at most C{count} elements.
"""
if value is None:
query = self.inequalityQuery(None, count, False)
else:
pyvalue = self._toComparableValue(value)
currentSortAttribute = self.currentSortColumn.sortAttribute()
query = self.inequalityQuery(
currentSortAttribute < pyvalue, count, False)
return self.constructRows(query)[::-1]
|
Retrieve display data for rows with sort-column values less than the
given value.
@type value: Some type compatible with the current sort column.
@param value: Starting value in the index for the current sort column
at which to start returning results. Rows with a column value for the
current sort column which is less than this value will be returned.
@type count: C{int}
@param count: The number of rows to return.
@return: A list of row data, ordered by the current sort column, ending
at C{value} and containing at most C{count} elements.
|
def set_wrappable_term(self, v, term):
"""Set the Root.Description, possibly splitting long descriptions across multiple terms. """
import textwrap
for t in self['Root'].find(term):
self.remove_term(t)
for l in textwrap.wrap(v, 80):
self['Root'].new_term(term, l)
|
Set the Root.Description, possibly splitting long descriptions across multiple terms.
|
def _arburg2(X, order):
"""This version is 10 times faster than arburg, but the output rho is not correct.
returns [1 a0,a1, an-1]
"""
x = np.array(X)
N = len(x)
if order <= 0.:
raise ValueError("order must be > 0")
# Initialisation
# ------ rho, den
rho = sum(abs(x)**2.) / N # Eq 8.21 [Marple]_
den = rho * 2. * N
# ------ backward and forward errors
ef = np.zeros(N, dtype=complex)
eb = np.zeros(N, dtype=complex)
for j in range(0, N): #eq 8.11
ef[j] = x[j]
eb[j] = x[j]
# AR order to be stored
a = np.zeros(1, dtype=complex)
a[0] = 1
# ---- rflection coeff to be stored
ref = np.zeros(order, dtype=complex)
temp = 1.
E = np.zeros(order+1)
E[0] = rho
for m in range(0, order):
#print m
# Calculate the next order reflection (parcor) coefficient
efp = ef[1:]
ebp = eb[0:-1]
#print efp, ebp
num = -2.* np.dot(ebp.conj().transpose(), efp)
den = np.dot(efp.conj().transpose(), efp)
den += np.dot(ebp, ebp.conj().transpose())
ref[m] = num / den
# Update the forward and backward prediction errors
ef = efp + ref[m] * ebp
eb = ebp + ref[m].conj().transpose() * efp
# Update the AR coeff.
a.resize(len(a)+1)
a = a + ref[m] * np.flipud(a).conjugate()
# Update the prediction error
E[m+1] = (1 - ref[m].conj().transpose()*ref[m]) * E[m]
#print 'REF', ref, num, den
return a, E[-1], ref
|
This version is 10 times faster than arburg, but the output rho is not correct.
returns [1 a0,a1, an-1]
|
def join_state_collections( collection_a, collection_b):
"""
Warning: This is a very naive join. Only use it when measures and groups will remain entirely within each subcollection.
For example: if each collection has states grouped by date and both include the same date, then the new collection
would have both of those groups, likely causing problems for group measures and potentially breaking many things.
"""
return StateCollection(
(collection_a.states + collection_b.states),
{ grouping_name:_combined_grouping_values(grouping_name, collection_a,collection_b)
for grouping_name in set( list(collection_a.groupings.keys()) + list(collection_b.groupings.keys()) ) })
|
Warning: This is a very naive join. Only use it when measures and groups will remain entirely within each subcollection.
For example: if each collection has states grouped by date and both include the same date, then the new collection
would have both of those groups, likely causing problems for group measures and potentially breaking many things.
|
def RAMON(typ):
"""
Takes str or int, returns class type
"""
if six.PY2:
lookup = [str, unicode]
elif six.PY3:
lookup = [str]
if type(typ) is int:
return _ramon_types[typ]
elif type(typ) in lookup:
return _ramon_types[_types[typ]]
|
Takes str or int, returns class type
|
def serach_path():
"""Return potential locations of IACA installation."""
operating_system = get_os()
# 1st choice: in ~/.kerncraft/iaca-{}
# 2nd choice: in package directory / iaca-{}
return [os.path.expanduser("~/.kerncraft/iaca/{}/".format(operating_system)),
os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/iaca/{}/'.format(
operating_system)]
|
Return potential locations of IACA installation.
|
def subscribe_topic(self, topics=[], pattern=None):
"""Subscribe to a list of topics, or a topic regex pattern.
- ``topics`` (list): List of topics for subscription.
- ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern,
but not both.
"""
if not isinstance(topics, list):
topics = [topics]
self.consumer.subscribe(topics, pattern=pattern)
|
Subscribe to a list of topics, or a topic regex pattern.
- ``topics`` (list): List of topics for subscription.
- ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern,
but not both.
|
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_added`` if
it exists.
'''
super(SessionCallbackAdded, self).dispatch(receiver)
if hasattr(receiver, '_session_callback_added'):
receiver._session_callback_added(self)
|
Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_added`` if
it exists.
|
def get_all_scores(self, motifs, dbmotifs, match, metric, combine,
pval=False, parallel=True, trim=None, ncpus=None):
"""Pairwise comparison of a set of motifs compared to reference motifs.
Parameters
----------
motifs : list
List of Motif instances.
dbmotifs : list
List of Motif instances.
match : str
Match can be "partial", "subtotal" or "total". Not all metrics use
this.
metric : str
Distance metric.
combine : str
Combine positional scores using "mean" or "sum". Not all metrics
use this.
pval : bool , optional
Calculate p-vale of match.
parallel : bool , optional
Use multiprocessing for parallel execution. True by default.
trim : float or None
If a float value is specified, motifs are trimmed used this IC
cutoff before comparison.
ncpus : int or None
Specifies the number of cores to use for parallel execution.
Returns
-------
scores : dict
Dictionary with scores.
"""
# trim motifs first, if specified
if trim:
for m in motifs:
m.trim(trim)
for m in dbmotifs:
m.trim(trim)
# hash of result scores
scores = {}
if parallel:
# Divide the job into big chunks, to keep parallel overhead to minimum
# Number of chunks = number of processors available
if ncpus is None:
ncpus = int(MotifConfig().get_default_params()["ncpus"])
pool = Pool(processes=ncpus, maxtasksperchild=1000)
batch_len = len(dbmotifs) // ncpus
if batch_len <= 0:
batch_len = 1
jobs = []
for i in range(0, len(dbmotifs), batch_len):
# submit jobs to the job server
p = pool.apply_async(_get_all_scores,
args=(self, motifs, dbmotifs[i: i + batch_len], match, metric, combine, pval))
jobs.append(p)
pool.close()
for job in jobs:
# Get the job result
result = job.get()
# and update the result score
for m1,v in result.items():
for m2, s in v.items():
if m1 not in scores:
scores[m1] = {}
scores[m1][m2] = s
pool.join()
else:
# Do the whole thing at once if we don't want parallel
scores = _get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval)
return scores
|
Pairwise comparison of a set of motifs compared to reference motifs.
Parameters
----------
motifs : list
List of Motif instances.
dbmotifs : list
List of Motif instances.
match : str
Match can be "partial", "subtotal" or "total". Not all metrics use
this.
metric : str
Distance metric.
combine : str
Combine positional scores using "mean" or "sum". Not all metrics
use this.
pval : bool , optional
Calculate p-vale of match.
parallel : bool , optional
Use multiprocessing for parallel execution. True by default.
trim : float or None
If a float value is specified, motifs are trimmed used this IC
cutoff before comparison.
ncpus : int or None
Specifies the number of cores to use for parallel execution.
Returns
-------
scores : dict
Dictionary with scores.
|
def xml_findall(xpath):
"""Find a list of XML elements via xpath."""
def xpath_findall(value):
validate(ET.iselement, value)
return value.findall(xpath)
return transform(xpath_findall)
|
Find a list of XML elements via xpath.
|
def data(self):
"""Data for packet creation."""
header = struct.pack('>BLB',
4, # version
self.created, # creation
self.algo_id) # public key algorithm ID
oid = util.prefix_len('>B', self.curve_info['oid'])
blob = self.curve_info['serialize'](self.verifying_key)
return header + oid + blob + self.ecdh_packet
|
Data for packet creation.
|
def add_log_type(name, display, color, bcolor):
"""
name : call name (A-Z and '_')
display : display message in [-]
color : text color (see bashutils.colors)
bcolor : background color (see bashutils.colors)
"""
global MESSAGE_LOG
v_name = name.replace(" ", "_").upper()
val = 0
lkey = MESSAGE_LOG.keys()
while val in lkey:
val += 1
MESSAGE_LOG[val] = [v_name, (display, color, bcolor,)]
setattr(LOG, v_name, val)
|
name : call name (A-Z and '_')
display : display message in [-]
color : text color (see bashutils.colors)
bcolor : background color (see bashutils.colors)
|
def do_macro_arg(parser, token):
""" Function taking a parsed template tag
to a MacroArgNode.
"""
# macro_arg takes no arguments, so we don't
# need to split the token/do validation.
nodelist = parser.parse(('endmacro_arg',))
parser.delete_first_token()
# simply save the contents to a MacroArgNode.
return MacroArgNode(nodelist)
|
Function taking a parsed template tag
to a MacroArgNode.
|
def has_abiext(self, ext, single_file=True):
"""
Returns the absolute path of the ABINIT file with extension ext.
Support both Fortran files and netcdf files. In the later case,
we check whether a file with extension ext + ".nc" is present
in the directory. Returns empty string is file is not present.
Raises:
`ValueError` if multiple files with the given ext are found.
This implies that this method is not compatible with multiple datasets.
"""
if ext != "abo":
ext = ext if ext.startswith('_') else '_' + ext
files = []
for f in self.list_filepaths():
# For the time being, we ignore DDB files in nc format.
if ext == "_DDB" and f.endswith(".nc"): continue
# Ignore BSE text files e.g. GW_NLF_MDF
if ext == "_MDF" and not f.endswith(".nc"): continue
# Ignore DDK.nc files (temporary workaround for v8.8.2 in which
# the DFPT code produces a new file with DDK.nc extension that enters
# into conflict with AbiPy convention.
if ext == "_DDK" and f.endswith(".nc"): continue
if f.endswith(ext) or f.endswith(ext + ".nc"):
files.append(f)
# This should fix the problem with the 1WF files in which the file extension convention is broken
if not files:
files = [f for f in self.list_filepaths() if fnmatch(f, "*%s*" % ext)]
if not files:
return ""
if len(files) > 1 and single_file:
# ABINIT users must learn that multiple datasets are bad!
raise ValueError("Found multiple files with the same extensions:\n %s\n" % files +
"Please avoid using multiple datasets!")
return files[0] if single_file else files
|
Returns the absolute path of the ABINIT file with extension ext.
Support both Fortran files and netcdf files. In the later case,
we check whether a file with extension ext + ".nc" is present
in the directory. Returns empty string is file is not present.
Raises:
`ValueError` if multiple files with the given ext are found.
This implies that this method is not compatible with multiple datasets.
|
def largest_graph(mol):
"""Return a molecule which has largest graph in the compound
Passing single molecule object will results as same as molutil.clone
"""
mol.require("Valence")
mol.require("Topology")
m = clone(mol) # Avoid modification of original object
if m.isolated:
for k in itertools.chain.from_iterable(m.isolated):
m.remove_atom(k)
return m
|
Return a molecule which has largest graph in the compound
Passing single molecule object will results as same as molutil.clone
|
def _pushMessages(self):
""" Internal callback used to make sure the msg list keeps moving. """
# This continues to get itself called until no msgs are left in list.
self.showStatus('')
if len(self._statusMsgsToShow) > 0:
self.top.after(200, self._pushMessages)
|
Internal callback used to make sure the msg list keeps moving.
|
def copy(self):
"""
Copy the selected text to the clipboard. If no text was selected, the
entire line is copied (this feature can be turned off by
setting :attr:`select_line_on_copy_empty` to False.
"""
if self.select_line_on_copy_empty and not self.textCursor().hasSelection():
TextHelper(self).select_whole_line()
super(CodeEdit, self).copy()
|
Copy the selected text to the clipboard. If no text was selected, the
entire line is copied (this feature can be turned off by
setting :attr:`select_line_on_copy_empty` to False.
|
def _compute_ymean(self, **kwargs):
"""Compute the (weighted) mean of the y data"""
y = np.asarray(kwargs.get('y', self.y))
dy = np.asarray(kwargs.get('dy', self.dy))
if dy.size == 1:
return np.mean(y)
else:
return np.average(y, weights=1 / dy ** 2)
|
Compute the (weighted) mean of the y data
|
async def processClaims(self, allClaims: Dict[ID, Claims]):
"""
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
"""
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res
|
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
|
def initiate_close(self):
"""Start closing the sender (won't complete until all data is sent)."""
self._running = False
self._accumulator.close()
self.wakeup()
|
Start closing the sender (won't complete until all data is sent).
|
def list_ctx(self):
"""Returns a list of contexts this parameter is initialized on."""
if self._data is None:
if self._deferred_init:
return self._deferred_init[1]
raise RuntimeError("Parameter '%s' has not been initialized"%self.name)
return self._ctx_list
|
Returns a list of contexts this parameter is initialized on.
|
def create_user(self, user_name, path='/'):
"""
Create a user.
:type user_name: string
:param user_name: The name of the new user
:type path: string
:param path: The path in which the user will be created.
Defaults to /.
"""
params = {'UserName' : user_name,
'Path' : path}
return self.get_response('CreateUser', params)
|
Create a user.
:type user_name: string
:param user_name: The name of the new user
:type path: string
:param path: The path in which the user will be created.
Defaults to /.
|
def _render(self, template, context, is_file, at_paths=None,
at_encoding=ENCODING, **kwargs):
"""
Render given template string and return the result.
:param template: Template content
:param context: A dict or dict-like object to instantiate given
template file
:param is_file: True if given `template` is a filename
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to jinja2.Envrionment. Please
note that 'loader' option is not supported because anytemplate does
not support to load template except for files
:return: Rendered string
"""
eopts = self.filter_options(kwargs, self.engine_valid_options())
self._env_options.update(eopts)
# Use custom loader to allow glob include.
loader = FileSystemExLoader(at_paths, encoding=at_encoding.lower(),
enable_glob=True)
env = jinja2.Environment(loader=loader, **self._env_options)
if kwargs:
context.update(kwargs)
try:
tmpl = (env.get_template if is_file else env.from_string)(template)
return tmpl.render(**context)
except jinja2.exceptions.TemplateNotFound as exc:
raise TemplateNotFound(str(exc))
|
Render given template string and return the result.
:param template: Template content
:param context: A dict or dict-like object to instantiate given
template file
:param is_file: True if given `template` is a filename
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to jinja2.Envrionment. Please
note that 'loader' option is not supported because anytemplate does
not support to load template except for files
:return: Rendered string
|
def get_all_instances(self, instance_ids=None, filters=None):
"""
Retrieve all the instances associated with your account.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
if filters:
if 'group-id' in filters:
gid = filters.get('group-id')
if not gid.startswith('sg-') or len(gid) != 11:
warnings.warn(
"The group-id filter now requires a security group "
"identifier (sg-*) instead of a group name. To filter "
"by group name use the 'group-name' filter instead.",
UserWarning)
self.build_filter_params(params, filters)
return self.get_list('DescribeInstances', params,
[('item', Reservation)], verb='POST')
|
Retrieve all the instances associated with your account.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
|
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i][0:x.size]) + \
self._weightsMatrix[i][x.size]
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i])
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class
|
Predict values for a single data point or an RDD of points
using the model trained.
|
def load(filename):
"""
Load the state from the given file, moving to the file's directory during
load (temporarily, moving back after loaded)
Parameters
----------
filename : string
name of the file to open, should be a .pkl file
"""
path, name = os.path.split(filename)
path = path or '.'
with util.indir(path):
return pickle.load(open(name, 'rb'))
|
Load the state from the given file, moving to the file's directory during
load (temporarily, moving back after loaded)
Parameters
----------
filename : string
name of the file to open, should be a .pkl file
|
def get_form(self, request, obj=None, **kwargs):
"""Get a :class:`Page <pages.admin.forms.PageForm>` for the
:class:`Page <pages.models.Page>` and modify its fields depending on
the request."""
template = get_template_from_request(request, obj)
#model = create_page_model(get_placeholders(template))
form = make_form(self.model, get_placeholders(template))
# bound the form
language = get_language_from_request(request)
form.base_fields['language'].initial = language
if obj:
initial_slug = obj.slug(language=language, fallback=False)
initial_title = obj.title(language=language, fallback=False)
form.base_fields['slug'].initial = initial_slug
form.base_fields['title'].initial = initial_title
template = get_template_from_request(request, obj)
page_templates = settings.get_page_templates()
template_choices = list(page_templates)
# is default template is not in the list add it
if not [tpl for tpl in template_choices if tpl[0] == settings.PAGE_DEFAULT_TEMPLATE]:
template_choices.insert(0, (settings.PAGE_DEFAULT_TEMPLATE,
_('Default template')))
form.base_fields['template'].choices = template_choices
form.base_fields['template'].initial = force_text(template)
for placeholder in get_placeholders(template):
ctype = placeholder.ctype
if obj:
initial = placeholder.get_content(obj, language, lang_fallback=False)
else:
initial = None
form.base_fields[ctype] = placeholder.get_field(obj,
language, initial=initial)
return form
|
Get a :class:`Page <pages.admin.forms.PageForm>` for the
:class:`Page <pages.models.Page>` and modify its fields depending on
the request.
|
def abort_copy_file(self, share_name, directory_name, file_name, copy_id, timeout=None):
'''
Aborts a pending copy_file operation, and leaves a destination file
with zero length and full metadata.
:param str share_name:
Name of destination share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of destination file.
:param str copy_id:
Copy identifier provided in the copy.id of the original
copy_file operation.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('copy_id', copy_id)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(share_name, directory_name, file_name)
request.query = {
'comp': 'copy',
'copyid': _to_str(copy_id),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-copy-action': 'abort',
}
self._perform_request(request)
|
Aborts a pending copy_file operation, and leaves a destination file
with zero length and full metadata.
:param str share_name:
Name of destination share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of destination file.
:param str copy_id:
Copy identifier provided in the copy.id of the original
copy_file operation.
:param int timeout:
The timeout parameter is expressed in seconds.
|
def reconstructed_pixelization_from_solution_vector(self, solution_vector):
"""Given the solution vector of an inversion (see *inversions.Inversion*), determine the reconstructed \
pixelization of the rectangular pixelization by using the mapper."""
recon = mapping_util.map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape(array_1d=solution_vector,
shape=self.shape)
return scaled_array.ScaledRectangularPixelArray(array=recon, pixel_scales=self.geometry.pixel_scales,
origin=self.geometry.origin)
|
Given the solution vector of an inversion (see *inversions.Inversion*), determine the reconstructed \
pixelization of the rectangular pixelization by using the mapper.
|
def handle_exception(exc_info=None, source_hint=None, tb_override=_NO):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None: # pragma: no cover
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from .runtime.debug import make_traceback as _make_traceback
exc_type, exc_value, tb = exc_info
if tb_override is not _NO: # pragma: no cover
tb = tb_override
traceback = _make_traceback((exc_type, exc_value, tb), source_hint)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb)
|
Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
|
def traverse_layout(root, callback):
"""
Tree walker and invokes the callback as it
traverse pdf object tree
"""
callback(root)
if isinstance(root, collections.Iterable):
for child in root:
traverse_layout(child, callback)
|
Tree walker and invokes the callback as it
traverse pdf object tree
|
def _check_file(self):
"""
Check if the file exists and has the expected password reference.
"""
if not os.path.exists(self.file_path):
return False
self._migrate()
config = configparser.RawConfigParser()
config.read(self.file_path)
try:
config.get(
escape_for_ini('keyring-setting'),
escape_for_ini('password reference'),
)
except (configparser.NoSectionError, configparser.NoOptionError):
return False
try:
self._check_scheme(config)
except AttributeError:
# accept a missing scheme
return True
return self._check_version(config)
|
Check if the file exists and has the expected password reference.
|
def deregister(self, reg_data, retry=True, interval=1, timeout=3):
"""
Deregister model/view of this bundle
"""
Retry(target=self.publish.direct.delete,
args=("/controller/registration", reg_data,),
kwargs={"timeout": timeout},
options={"retry": retry, "interval": interval})
_logger.debug("Deregister successfully %s tunnel: %s" %
(reg_data["name"],
self._conn.tunnels[reg_data["role"]][0],))
|
Deregister model/view of this bundle
|
def metadata(self):
"""return a cp:metadata element with the metadata in the document"""
md = self.xml(src="docProps/core.xml")
if md is None:
md = XML(root=etree.Element("{%(cp)s}metadata" % self.NS))
return md.root
|
return a cp:metadata element with the metadata in the document
|
def _add_sub_elements_from_dict(parent, sub_dict):
"""
Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}}
"""
for key, value in sub_dict.items():
if isinstance(value, list):
for repeated_element in value:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, repeated_element.get("attrs", {}))
children = repeated_element.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children
else:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, value.get("attrs", {}))
children = value.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children
|
Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}}
|
def recarrayequalspairs(X,Y,weak=True):
"""
Indices of elements in a sorted numpy recarray (or ndarray with
structured dtype) equal to those in another.
Record array version of func:`tabular.fast.equalspairs`, but slightly
different because the concept of being sorted is less well-defined for a
record array.
Given numpy recarray `X` and sorted numpy recarray `Y`, determine the
indices in Y equal to indices in X.
Returns `[A,B,s]` where `s` is a permutation of `Y` such that for::
Y = X[s]
we have::
Y[A[i]:B[i]] = Y[Y == X[i]]
`A[i] = B[i] = 0` if `X[i]` is not in `Y`.
**Parameters**
**X** : numpy recarray
Numpy recarray to compare to the sorted numpy recarray `Y`.
**Y** : numpy recarray
Sorted numpy recarray. Determine the indices of elements
of `Y` equal to those in numpy array `X`.
**Returns**
**A** : numpy array
List of indices in `Y`, `len(A) = len(Y)`.
**B** : numpy array
List of indices in `Y`, `len(B) = len(Y)`.
**s** : numpy array
Permutation of `Y`.
**See Also:**
:func:`tabular.fast.recarrayequalspairs`
"""
if (weak and set(X.dtype.names) != set(Y.dtype.names)) or \
(not weak and X.dtype.names != Y.dtype.names):
return [np.zeros((len(X),),int),np.zeros((len(X),),int),None]
else:
if X.dtype.names != Y.dtype.names:
Y = np.rec.fromarrays([Y[a] for a in X.dtype.names],
names= X.dtype.names)
NewX = np.array([str(l) for l in X])
NewY = np.array([str(l) for l in Y])
s = NewY.argsort() ; NewY.sort()
[A,B] = equalspairs(NewX,NewY)
return [A,B,s]
|
Indices of elements in a sorted numpy recarray (or ndarray with
structured dtype) equal to those in another.
Record array version of func:`tabular.fast.equalspairs`, but slightly
different because the concept of being sorted is less well-defined for a
record array.
Given numpy recarray `X` and sorted numpy recarray `Y`, determine the
indices in Y equal to indices in X.
Returns `[A,B,s]` where `s` is a permutation of `Y` such that for::
Y = X[s]
we have::
Y[A[i]:B[i]] = Y[Y == X[i]]
`A[i] = B[i] = 0` if `X[i]` is not in `Y`.
**Parameters**
**X** : numpy recarray
Numpy recarray to compare to the sorted numpy recarray `Y`.
**Y** : numpy recarray
Sorted numpy recarray. Determine the indices of elements
of `Y` equal to those in numpy array `X`.
**Returns**
**A** : numpy array
List of indices in `Y`, `len(A) = len(Y)`.
**B** : numpy array
List of indices in `Y`, `len(B) = len(Y)`.
**s** : numpy array
Permutation of `Y`.
**See Also:**
:func:`tabular.fast.recarrayequalspairs`
|
def replica_lag(self, **kwargs):
"""
Returns the current replication lag in seconds between the master and replica databases.
:returns: float
"""
if not self._use_replica():
return 0
try:
kwargs['stack'] = self.stack_mark(inspect.stack())
sql = "select EXTRACT(EPOCH FROM NOW() - pg_last_xact_replay_timestamp()) AS replication_lag"
return self.collection_instance(
self.db_adapter().raw_query(
sql=sql, **kwargs
)
).squeeze()
except:
return 0
|
Returns the current replication lag in seconds between the master and replica databases.
:returns: float
|
def disk_pick_polar(n=1, rng=None):
"""Return vectors uniformly picked on the unit disk.
The unit disk is the space enclosed by the unit circle.
Vectors are in a polar representation.
Parameters
----------
n: integer
Number of points to return.
Returns
-------
r: array, shape (n, 2)
Sample vectors.
"""
if rng is None:
rng = np.random
a = np.zeros([n, 2], dtype=np.float)
a[:, 0] = np.sqrt(rng.uniform(size=n))
a[:, 1] = rng.uniform(0.0, 2.0 * np.pi, size=n)
return a
|
Return vectors uniformly picked on the unit disk.
The unit disk is the space enclosed by the unit circle.
Vectors are in a polar representation.
Parameters
----------
n: integer
Number of points to return.
Returns
-------
r: array, shape (n, 2)
Sample vectors.
|
def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
"""Asserts that two dictionaries are equal, producing a custom message if they are not."""
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %r" % (
_dict_path_string(dict_path),
expected_keys - actual_keys,
)
assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %r" % (
_dict_path_string(dict_path),
actual_keys - expected_keys,
)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(
actual[k],
type(expected[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
assert_is_instance(
expected[k],
type(actual[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
if isinstance(actual[k], dict):
assert_dict_eq(
expected[k],
actual[k],
number_tolerance=number_tolerance,
dict_path=key_path,
)
elif isinstance(actual[k], _number_types):
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
tolerance=number_tolerance,
)
else:
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
)
|
Asserts that two dictionaries are equal, producing a custom message if they are not.
|
def _unascii(s):
"""Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8
This method takes the output of the JSONEncoder and expands any \\uNNNN
escapes it finds (except for \\u0000 to \\u001F, which are converted to
\\xNN escapes).
For performance, it assumes that the input is valid JSON, and performs few
sanity checks.
"""
# make the fast path fast: if there are no matches in the string, the
# whole thing is ascii. On python 2, that means we're done. On python 3,
# we have to turn it into a bytes, which is quickest with encode('utf-8')
m = _U_ESCAPE.search(s)
if not m:
return s if PY2 else s.encode('utf-8')
# appending to a string (or a bytes) is slooow, so we accumulate sections
# of string result in 'chunks', and join them all together later.
# (It doesn't seem to make much difference whether we accumulate
# utf8-encoded bytes, or strings which we utf-8 encode after rejoining)
#
chunks = []
# 'pos' tracks the index in 's' that we have processed into 'chunks' so
# far.
pos = 0
while m:
start = m.start()
end = m.end()
g = m.group(1)
if g is None:
# escaped backslash: pass it through along with anything before the
# match
chunks.append(s[pos:end])
else:
# \uNNNN, but we have to watch out for surrogate pairs.
#
# On python 2, str.encode("utf-8") will decode utf-16 surrogates
# before re-encoding, so it's fine for us to pass the surrogates
# through. (Indeed we must, to deal with UCS-2 python builds, per
# https://github.com/matrix-org/python-canonicaljson/issues/12).
#
# On python 3, str.encode("utf-8") complains about surrogates, so
# we have to unpack them.
c = int(g, 16)
if c < 0x20:
# leave as a \uNNNN escape
chunks.append(s[pos:end])
else:
if PY3: # pragma nocover
if c & 0xfc00 == 0xd800 and s[end:end + 2] == '\\u':
esc2 = s[end + 2:end + 6]
c2 = int(esc2, 16)
if c2 & 0xfc00 == 0xdc00:
c = 0x10000 + (((c - 0xd800) << 10) |
(c2 - 0xdc00))
end += 6
chunks.append(s[pos:start])
chunks.append(unichr(c))
pos = end
m = _U_ESCAPE.search(s, pos)
# pass through anything after the last match
chunks.append(s[pos:])
return (''.join(chunks)).encode("utf-8")
|
Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8
This method takes the output of the JSONEncoder and expands any \\uNNNN
escapes it finds (except for \\u0000 to \\u001F, which are converted to
\\xNN escapes).
For performance, it assumes that the input is valid JSON, and performs few
sanity checks.
|
def cat(ctx, archive_name, version):
'''
Echo the contents of an archive
'''
_generate_api(ctx)
var = ctx.obj.api.get_archive(archive_name)
with var.open('r', version=version) as f:
for chunk in iter(lambda: f.read(1024 * 1024), ''):
click.echo(chunk)
|
Echo the contents of an archive
|
def metadata_converter_help_content():
"""Helper method that returns just the content in extent mode.
This method was added so that the text could be reused in the
wizard.
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
message = m.Message()
paragraph = m.Paragraph(tr(
'This tool will convert InaSAFE 4.x keyword metadata into the '
'metadata format used by InaSAFE 3.5. The primary reason for doing '
'this is to prepare data for use in GeoSAFE - the online version of '
'InaSAFE.'
))
message.add(paragraph)
paragraph = m.Paragraph(tr(
'You should note that this tool will not touch the original data or '
'metadata associated with a layer. Instead it will make a copy of the '
'original layer to the place that you nominate, and create a new '
'keywords XML file to accompany that data. This new keywords file '
'will contain InaSAFE keywords in the 3.5 format.'
))
message.add(paragraph)
return message
|
Helper method that returns just the content in extent mode.
This method was added so that the text could be reused in the
wizard.
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
|
def dumps(data): # type: (_TOMLDocument) -> str
"""
Dumps a TOMLDocument into a string.
"""
if not isinstance(data, _TOMLDocument) and isinstance(data, dict):
data = item(data)
return data.as_string()
|
Dumps a TOMLDocument into a string.
|
def _get_table_data(self):
"""Return clipboard processed as data"""
data = self._simplify_shape(
self.table_widget.get_data())
if self.table_widget.array_btn.isChecked():
return array(data)
elif pd and self.table_widget.df_btn.isChecked():
info = self.table_widget.pd_info
buf = io.StringIO(self.table_widget.pd_text)
return pd.read_csv(buf, **info)
return data
|
Return clipboard processed as data
|
def beam_search(self, text:str, n_words:int, no_unk:bool=True, top_k:int=10, beam_sz:int=1000, temperature:float=1.,
sep:str=' ', decoder=decode_spec_tokens):
"Return the `n_words` that come after `text` using beam search."
ds = self.data.single_dl.dataset
self.model.reset()
xb, yb = self.data.one_item(text)
nodes = None
xb = xb.repeat(top_k, 1)
nodes = xb.clone()
scores = xb.new_zeros(1).float()
with torch.no_grad():
for k in progress_bar(range(n_words), leave=False):
out = F.log_softmax(self.model(xb)[0][:,-1], dim=-1)
if no_unk: out[:,self.data.vocab.stoi[UNK]] = -float('Inf')
values, indices = out.topk(top_k, dim=-1)
scores = (-values + scores[:,None]).view(-1)
indices_idx = torch.arange(0,nodes.size(0))[:,None].expand(nodes.size(0), top_k).contiguous().view(-1)
sort_idx = scores.argsort()[:beam_sz]
scores = scores[sort_idx]
nodes = torch.cat([nodes[:,None].expand(nodes.size(0),top_k,nodes.size(1)),
indices[:,:,None].expand(nodes.size(0),top_k,1),], dim=2)
nodes = nodes.view(-1, nodes.size(2))[sort_idx]
self.model[0].select_hidden(indices_idx[sort_idx])
xb = nodes[:,-1][:,None]
if temperature != 1.: scores.div_(temperature)
node_idx = torch.multinomial(torch.exp(-scores), 1).item()
return text + sep + sep.join(decoder(self.data.vocab.textify([i.item() for i in nodes[node_idx][1:] ], sep=None)))
|
Return the `n_words` that come after `text` using beam search.
|
def set_access_port(self, port_number, vlan_id):
"""
Sets the specified port as an ACCESS port.
:param port_number: allocated port number
:param vlan_id: VLAN number membership
"""
if port_number not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port_number))
nio = self._nios[port_number]
yield from self._hypervisor.send('ethsw set_access_port "{name}" {nio} {vlan_id}'.format(name=self._name,
nio=nio,
vlan_id=vlan_id))
log.info('Ethernet switch "{name}" [{id}]: port {port} set as an access port in VLAN {vlan_id}'.format(name=self._name,
id=self._id,
port=port_number,
vlan_id=vlan_id))
self._mappings[port_number] = ("access", vlan_id)
|
Sets the specified port as an ACCESS port.
:param port_number: allocated port number
:param vlan_id: VLAN number membership
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.