code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def solutionEmitter(target, source, env):
"""Sets up the DSW dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSSOLUTIONSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSSOLUTIONSUFFIX')
target[0] = base + suff
if not source:
source = 'sln_inputs:'
if 'name' in env:
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError("name must be a string")
if 'variant' in env:
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError("name must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be specified")
if 'slnguid' in env:
if SCons.Util.is_String(env['slnguid']):
source = source + ' "%s"' % env['slnguid']
else:
raise SCons.Errors.InternalError("slnguid must be a string")
if 'projects' in env:
if SCons.Util.is_String(env['projects']):
source = source + ' "%s"' % env['projects']
elif SCons.Util.is_List(env['projects']):
for t in env['projects']:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
return ([target[0]], source) | Sets up the DSW dependencies. | Below is the the instruction that describes the task:
### Input:
Sets up the DSW dependencies.
### Response:
def solutionEmitter(target, source, env):
"""Sets up the DSW dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSSOLUTIONSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSSOLUTIONSUFFIX')
target[0] = base + suff
if not source:
source = 'sln_inputs:'
if 'name' in env:
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError("name must be a string")
if 'variant' in env:
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError("name must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be specified")
if 'slnguid' in env:
if SCons.Util.is_String(env['slnguid']):
source = source + ' "%s"' % env['slnguid']
else:
raise SCons.Errors.InternalError("slnguid must be a string")
if 'projects' in env:
if SCons.Util.is_String(env['projects']):
source = source + ' "%s"' % env['projects']
elif SCons.Util.is_List(env['projects']):
for t in env['projects']:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
return ([target[0]], source) |
def get_all_zones(self, zones=None, filters=None):
"""
Get all Availability Zones associated with the current region.
:type zones: list
:param zones: Optional list of zones. If this list is present,
only the Zones associated with these zone names
will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.zone.Zone`
:return: The requested Zone objects
"""
params = {}
if zones:
self.build_list_params(params, zones, 'ZoneName')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeAvailabilityZones', params,
[('item', Zone)], verb='POST') | Get all Availability Zones associated with the current region.
:type zones: list
:param zones: Optional list of zones. If this list is present,
only the Zones associated with these zone names
will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.zone.Zone`
:return: The requested Zone objects | Below is the the instruction that describes the task:
### Input:
Get all Availability Zones associated with the current region.
:type zones: list
:param zones: Optional list of zones. If this list is present,
only the Zones associated with these zone names
will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.zone.Zone`
:return: The requested Zone objects
### Response:
def get_all_zones(self, zones=None, filters=None):
"""
Get all Availability Zones associated with the current region.
:type zones: list
:param zones: Optional list of zones. If this list is present,
only the Zones associated with these zone names
will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.zone.Zone`
:return: The requested Zone objects
"""
params = {}
if zones:
self.build_list_params(params, zones, 'ZoneName')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeAvailabilityZones', params,
[('item', Zone)], verb='POST') |
def get_oldest_commit(self):
'''
Get oldest commit involving this file
:returns: Oldest commit
'''
return self.git.get_commits(self.content.source_path, self.follow)[-1] | Get oldest commit involving this file
:returns: Oldest commit | Below is the the instruction that describes the task:
### Input:
Get oldest commit involving this file
:returns: Oldest commit
### Response:
def get_oldest_commit(self):
'''
Get oldest commit involving this file
:returns: Oldest commit
'''
return self.git.get_commits(self.content.source_path, self.follow)[-1] |
def delete_hook(self, auth, username, repo_name, hook_id):
"""
Deletes the hook with id ``hook_id`` for repo with name ``repo_name``
owned by the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository of hook to delete
:param int hook_id: id of hook to delete
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = "/repos/{u}/{r}/hooks/{i}".format(u=username, r=repo_name, i=hook_id)
self.delete(path, auth=auth) | Deletes the hook with id ``hook_id`` for repo with name ``repo_name``
owned by the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository of hook to delete
:param int hook_id: id of hook to delete
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced | Below is the the instruction that describes the task:
### Input:
Deletes the hook with id ``hook_id`` for repo with name ``repo_name``
owned by the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository of hook to delete
:param int hook_id: id of hook to delete
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
### Response:
def delete_hook(self, auth, username, repo_name, hook_id):
"""
Deletes the hook with id ``hook_id`` for repo with name ``repo_name``
owned by the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository of hook to delete
:param int hook_id: id of hook to delete
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = "/repos/{u}/{r}/hooks/{i}".format(u=username, r=repo_name, i=hook_id)
self.delete(path, auth=auth) |
def check_link_integrity(m, link):
'''
Check the model for integrity violations on an association in a particular direction.
'''
res = 0
for inst in link.from_metaclass.select_many():
q_set = list(link.navigate(inst))
if(len(q_set) < 1 and not link.conditional) or (
(len(q_set) > 1 and not link.many)):
res += 1
logger.warning('integrity violation in '
'%s --(%s)--> %s' % (pretty_from_link(inst, link),
link.rel_id,
pretty_to_link(inst, link)))
return res | Check the model for integrity violations on an association in a particular direction. | Below is the the instruction that describes the task:
### Input:
Check the model for integrity violations on an association in a particular direction.
### Response:
def check_link_integrity(m, link):
'''
Check the model for integrity violations on an association in a particular direction.
'''
res = 0
for inst in link.from_metaclass.select_many():
q_set = list(link.navigate(inst))
if(len(q_set) < 1 and not link.conditional) or (
(len(q_set) > 1 and not link.many)):
res += 1
logger.warning('integrity violation in '
'%s --(%s)--> %s' % (pretty_from_link(inst, link),
link.rel_id,
pretty_to_link(inst, link)))
return res |
def shard_data(self, region):
"""
Get League of Legends status for the given shard.
Requests to this API are not counted against the application Rate Limits.
:param string region: the region to execute this request on
:returns: ShardStatus
"""
url, query = LolStatusApiV3Urls.shard_data(region=region)
return self._raw_request(self.shard_data.__name__, region, url, query) | Get League of Legends status for the given shard.
Requests to this API are not counted against the application Rate Limits.
:param string region: the region to execute this request on
:returns: ShardStatus | Below is the the instruction that describes the task:
### Input:
Get League of Legends status for the given shard.
Requests to this API are not counted against the application Rate Limits.
:param string region: the region to execute this request on
:returns: ShardStatus
### Response:
def shard_data(self, region):
"""
Get League of Legends status for the given shard.
Requests to this API are not counted against the application Rate Limits.
:param string region: the region to execute this request on
:returns: ShardStatus
"""
url, query = LolStatusApiV3Urls.shard_data(region=region)
return self._raw_request(self.shard_data.__name__, region, url, query) |
def build(self, targets: Iterable[str]) -> Iterable[str]:
"""
Shell out to buck to build the targets, then yield the paths to the
link trees.
"""
return generate_source_directories(
targets, build=self._build, prompt=self._prompt
) | Shell out to buck to build the targets, then yield the paths to the
link trees. | Below is the the instruction that describes the task:
### Input:
Shell out to buck to build the targets, then yield the paths to the
link trees.
### Response:
def build(self, targets: Iterable[str]) -> Iterable[str]:
"""
Shell out to buck to build the targets, then yield the paths to the
link trees.
"""
return generate_source_directories(
targets, build=self._build, prompt=self._prompt
) |
def create_injector(param_name, fun_param_value):
'''Dependency injection with Bottle.
This creates a simple dependency injector that will map
``param_name`` in routes to the value ``fun_param_value()``
each time the route is invoked.
``fun_param_value`` is a closure so that it is lazily evaluated.
This is useful for handling thread local services like database
connections.
:param str param_name: name of function parameter to inject into
:param fun_param_value: the value to insert
:type fun_param_value: a closure that can be applied with zero
arguments
'''
class _(object):
api = 2
def apply(self, callback, route):
if param_name not in inspect.getargspec(route.callback)[0]:
return callback
def _(*args, **kwargs):
pval = fun_param_value()
if pval is None:
logger.error('service "%s" unavailable', param_name)
bottle.abort(503, 'service "%s" unavailable' % param_name)
return
kwargs[param_name] = pval
return callback(*args, **kwargs)
return _
return _() | Dependency injection with Bottle.
This creates a simple dependency injector that will map
``param_name`` in routes to the value ``fun_param_value()``
each time the route is invoked.
``fun_param_value`` is a closure so that it is lazily evaluated.
This is useful for handling thread local services like database
connections.
:param str param_name: name of function parameter to inject into
:param fun_param_value: the value to insert
:type fun_param_value: a closure that can be applied with zero
arguments | Below is the the instruction that describes the task:
### Input:
Dependency injection with Bottle.
This creates a simple dependency injector that will map
``param_name`` in routes to the value ``fun_param_value()``
each time the route is invoked.
``fun_param_value`` is a closure so that it is lazily evaluated.
This is useful for handling thread local services like database
connections.
:param str param_name: name of function parameter to inject into
:param fun_param_value: the value to insert
:type fun_param_value: a closure that can be applied with zero
arguments
### Response:
def create_injector(param_name, fun_param_value):
'''Dependency injection with Bottle.
This creates a simple dependency injector that will map
``param_name`` in routes to the value ``fun_param_value()``
each time the route is invoked.
``fun_param_value`` is a closure so that it is lazily evaluated.
This is useful for handling thread local services like database
connections.
:param str param_name: name of function parameter to inject into
:param fun_param_value: the value to insert
:type fun_param_value: a closure that can be applied with zero
arguments
'''
class _(object):
api = 2
def apply(self, callback, route):
if param_name not in inspect.getargspec(route.callback)[0]:
return callback
def _(*args, **kwargs):
pval = fun_param_value()
if pval is None:
logger.error('service "%s" unavailable', param_name)
bottle.abort(503, 'service "%s" unavailable' % param_name)
return
kwargs[param_name] = pval
return callback(*args, **kwargs)
return _
return _() |
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper | Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose. | Below is the the instruction that describes the task:
### Input:
Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
### Response:
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper |
def get_updates_view(request):
"""Return a user's updates. AJAX."""
if not request.is_ajax():
raise Http404
if not request.user.is_authenticated():
return HttpResponse(json.dumps(dict()),
content_type="application/json")
try:
user_profile = UserProfile.objects.get(user=request.user)
except UserProfile.DoesNotExist:
return HttpResponse(json.dumps(dict()),
content_type="application/json")
response = dict()
if request.user.is_superuser:
num_of_profile_requests = ProfileRequest.objects.all().count()
if num_of_profile_requests == 0:
response['profile_requests_link'] = '''
<span class="glyphicon glyphicon-inbox"></span>
Profile Requests
'''
else:
response['profile_requests_link'] = """
<span title="{req_num} open profile request{mult}"
class="badge pull-right">{req_num}</span>
<span class="glyphicon glyphicon-inbox"></span>
Profile Requests
""".format(
req_num=num_of_profile_requests,
mult='s' if num_of_profile_requests > 1 else '',
)
notification_count = request.user.notifications.unread().count()
if notification_count == 0:
response['notifications_link'] = '''
<span class="glyphicon glyphicon-bell"></span>
Notifications
'''
response['profile_dropdown_link'] = '''
<span><span class="pull-right"><b class="caret"></b></span>
<span class="glyphicon glyphicon-user"></span>
{first_name} </span>
'''.format(first_name=request.user.first_name)
else:
response['notifications_link'] = """
<span title="{n_num} unread notification{mult}"
class="badge pull-right">{n_num}</span>
<span class="glyphicon glyphicon-bell"></span>
Notifications
""".format(
n_num=notification_count,
mult='s' if notification_count > 1 else '',
)
response['profile_dropdown_link'] = '''
<span><span class="pull-right"><b class="caret"></b></span><span
title="You have {n_num} unread notification{mult}."
class="badge pull-right">{n_num}</span>
<span class="glyphicon glyphicon-user"></span>
{first_name} </span>
'''.format(
n_num=notification_count,
mult='s' if notification_count > 1 else '',
first_name=request.user.first_name,
)
req_dict = dict()
for req_type in RequestType.objects.filter(enabled=True):
open_reqs = Request.objects.filter(request_type=req_type,
status=Request.OPEN)
if not req_type.managers.filter(incumbent__user=request.user):
open_reqs = open_reqs.exclude(
~Q(owner__user=request.user), private=True,
)
num_open = open_reqs.count()
if num_open == 0:
req_dict['{rtype}_requests_link'.format(rtype=req_type.url_name)] \
= """
<span class="glyphicon glyphicon-{icon}"></span>
{name}
""".format(
icon=req_type.glyphicon if req_type.glyphicon else 'inbox',
name=req_type.name
)
else:
req_dict['{rtype}_requests_link'.format(rtype=req_type.url_name)] \
= """
<span title="{num} open request{mult}"
class="badge pull-right">{num}</span>
<span class="glyphicon glyphicon-{icon}"></span>
{name}
""".format(
num=num_open,
mult='s' if num_open > 1 else '',
icon=req_type.glyphicon if req_type.glyphicon else 'inbox',
name=req_type.name,
)
if req_dict.keys():
response['requests_dict'] = req_dict
request_pk_list = request.GET.get('request_pk_list', False)
if request_pk_list:
request_pk_list = request_pk_list.split(',')
for request_pk in request_pk_list:
try:
req = Request.objects.get(pk=request_pk)
except Request.DoesNotExist:
continue
response['vote_count_{pk}'.format(pk=req.pk)] = req.upvotes.all().count()
list_string = 'vote_list_{pk}'.format(pk=request_pk)
vote_string = 'in_votes_{pk}'.format(pk=request_pk)
count_string = 'vote_count_{pk}'.format(pk=request_pk)
response[list_string], response[vote_string], \
response[count_string] = build_ajax_votes(
req,
user_profile
)
event_pk_list = request.GET.get('event_pk_list', False)
if event_pk_list:
event_pk_list = event_pk_list.split(',')
for event_pk in event_pk_list:
try:
event = Event.objects.get(pk=event_pk)
except Event.DoesNotExist:
continue
link_string = 'rsvp_link_{pk}'.format(pk=event.pk)
list_string = 'rsvp_list_{pk}'.format(pk=event.pk)
response[link_string], response[list_string] = build_ajax_rsvps(
event,
user_profile
)
thread_pk = request.GET.get('thread_pk', False)
if thread_pk:
try:
thread = Thread.objects.get(pk=thread_pk)
except Thread.DoesNotExist:
pass
else:
response['following'] = user_profile in thread.followers.all()
response['num_of_followers'] = thread.followers.all().count()
return HttpResponse(json.dumps(response),
content_type="application/json") | Return a user's updates. AJAX. | Below is the the instruction that describes the task:
### Input:
Return a user's updates. AJAX.
### Response:
def get_updates_view(request):
"""Return a user's updates. AJAX."""
if not request.is_ajax():
raise Http404
if not request.user.is_authenticated():
return HttpResponse(json.dumps(dict()),
content_type="application/json")
try:
user_profile = UserProfile.objects.get(user=request.user)
except UserProfile.DoesNotExist:
return HttpResponse(json.dumps(dict()),
content_type="application/json")
response = dict()
if request.user.is_superuser:
num_of_profile_requests = ProfileRequest.objects.all().count()
if num_of_profile_requests == 0:
response['profile_requests_link'] = '''
<span class="glyphicon glyphicon-inbox"></span>
Profile Requests
'''
else:
response['profile_requests_link'] = """
<span title="{req_num} open profile request{mult}"
class="badge pull-right">{req_num}</span>
<span class="glyphicon glyphicon-inbox"></span>
Profile Requests
""".format(
req_num=num_of_profile_requests,
mult='s' if num_of_profile_requests > 1 else '',
)
notification_count = request.user.notifications.unread().count()
if notification_count == 0:
response['notifications_link'] = '''
<span class="glyphicon glyphicon-bell"></span>
Notifications
'''
response['profile_dropdown_link'] = '''
<span><span class="pull-right"><b class="caret"></b></span>
<span class="glyphicon glyphicon-user"></span>
{first_name} </span>
'''.format(first_name=request.user.first_name)
else:
response['notifications_link'] = """
<span title="{n_num} unread notification{mult}"
class="badge pull-right">{n_num}</span>
<span class="glyphicon glyphicon-bell"></span>
Notifications
""".format(
n_num=notification_count,
mult='s' if notification_count > 1 else '',
)
response['profile_dropdown_link'] = '''
<span><span class="pull-right"><b class="caret"></b></span><span
title="You have {n_num} unread notification{mult}."
class="badge pull-right">{n_num}</span>
<span class="glyphicon glyphicon-user"></span>
{first_name} </span>
'''.format(
n_num=notification_count,
mult='s' if notification_count > 1 else '',
first_name=request.user.first_name,
)
req_dict = dict()
for req_type in RequestType.objects.filter(enabled=True):
open_reqs = Request.objects.filter(request_type=req_type,
status=Request.OPEN)
if not req_type.managers.filter(incumbent__user=request.user):
open_reqs = open_reqs.exclude(
~Q(owner__user=request.user), private=True,
)
num_open = open_reqs.count()
if num_open == 0:
req_dict['{rtype}_requests_link'.format(rtype=req_type.url_name)] \
= """
<span class="glyphicon glyphicon-{icon}"></span>
{name}
""".format(
icon=req_type.glyphicon if req_type.glyphicon else 'inbox',
name=req_type.name
)
else:
req_dict['{rtype}_requests_link'.format(rtype=req_type.url_name)] \
= """
<span title="{num} open request{mult}"
class="badge pull-right">{num}</span>
<span class="glyphicon glyphicon-{icon}"></span>
{name}
""".format(
num=num_open,
mult='s' if num_open > 1 else '',
icon=req_type.glyphicon if req_type.glyphicon else 'inbox',
name=req_type.name,
)
if req_dict.keys():
response['requests_dict'] = req_dict
request_pk_list = request.GET.get('request_pk_list', False)
if request_pk_list:
request_pk_list = request_pk_list.split(',')
for request_pk in request_pk_list:
try:
req = Request.objects.get(pk=request_pk)
except Request.DoesNotExist:
continue
response['vote_count_{pk}'.format(pk=req.pk)] = req.upvotes.all().count()
list_string = 'vote_list_{pk}'.format(pk=request_pk)
vote_string = 'in_votes_{pk}'.format(pk=request_pk)
count_string = 'vote_count_{pk}'.format(pk=request_pk)
response[list_string], response[vote_string], \
response[count_string] = build_ajax_votes(
req,
user_profile
)
event_pk_list = request.GET.get('event_pk_list', False)
if event_pk_list:
event_pk_list = event_pk_list.split(',')
for event_pk in event_pk_list:
try:
event = Event.objects.get(pk=event_pk)
except Event.DoesNotExist:
continue
link_string = 'rsvp_link_{pk}'.format(pk=event.pk)
list_string = 'rsvp_list_{pk}'.format(pk=event.pk)
response[link_string], response[list_string] = build_ajax_rsvps(
event,
user_profile
)
thread_pk = request.GET.get('thread_pk', False)
if thread_pk:
try:
thread = Thread.objects.get(pk=thread_pk)
except Thread.DoesNotExist:
pass
else:
response['following'] = user_profile in thread.followers.all()
response['num_of_followers'] = thread.followers.all().count()
return HttpResponse(json.dumps(response),
content_type="application/json") |
def freeze_archive(tmp_dir, dest_prefix):
"""Generates a ZIP file of secrets"""
zip_filename = "%s/aomi-blah.zip" % tmp_dir
archive = zipfile.ZipFile(zip_filename, 'w')
for root, _dirnames, filenames in os.walk(dest_prefix):
for filename in filenames:
relative_path = subdir_path(root, dest_prefix).split(os.sep)[1:]
relative_path = os.sep.join(relative_path)
archive.write("%s/%s" % (root, filename),
"%s/%s" % (relative_path, filename))
archive.close()
return zip_filename | Generates a ZIP file of secrets | Below is the the instruction that describes the task:
### Input:
Generates a ZIP file of secrets
### Response:
def freeze_archive(tmp_dir, dest_prefix):
"""Generates a ZIP file of secrets"""
zip_filename = "%s/aomi-blah.zip" % tmp_dir
archive = zipfile.ZipFile(zip_filename, 'w')
for root, _dirnames, filenames in os.walk(dest_prefix):
for filename in filenames:
relative_path = subdir_path(root, dest_prefix).split(os.sep)[1:]
relative_path = os.sep.join(relative_path)
archive.write("%s/%s" % (root, filename),
"%s/%s" % (relative_path, filename))
archive.close()
return zip_filename |
def p_array_literal_2(self, p):
"""array_literal : LBRACKET element_list RBRACKET
| LBRACKET element_list COMMA elision_opt RBRACKET
"""
items = p[2]
if len(p) == 6:
items.extend(p[4])
p[0] = self.asttypes.Array(items=items)
p[0].setpos(p) | array_literal : LBRACKET element_list RBRACKET
| LBRACKET element_list COMMA elision_opt RBRACKET | Below is the the instruction that describes the task:
### Input:
array_literal : LBRACKET element_list RBRACKET
| LBRACKET element_list COMMA elision_opt RBRACKET
### Response:
def p_array_literal_2(self, p):
"""array_literal : LBRACKET element_list RBRACKET
| LBRACKET element_list COMMA elision_opt RBRACKET
"""
items = p[2]
if len(p) == 6:
items.extend(p[4])
p[0] = self.asttypes.Array(items=items)
p[0].setpos(p) |
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved | Gets a single list of messages from all storage backends. | Below is the the instruction that describes the task:
### Input:
Gets a single list of messages from all storage backends.
### Response:
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved |
def is_allowed(self, role, method, resource):
"""Check whether role is allowed to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked.
"""
return (role, method, resource) in self._allowed | Check whether role is allowed to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked. | Below is the the instruction that describes the task:
### Input:
Check whether role is allowed to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked.
### Response:
def is_allowed(self, role, method, resource):
"""Check whether role is allowed to access resource
:param role: Role to be checked.
:param method: Method to be checked.
:param resource: View function to be checked.
"""
return (role, method, resource) in self._allowed |
def _wrap(text, wrap_max=80, indent=4):
"""Wrap text at given width using textwrap module.
text (unicode): Text to wrap. If it's a Path, it's converted to string.
wrap_max (int): Maximum line length (indent is deducted).
indent (int): Number of spaces for indentation.
RETURNS (unicode): Wrapped text.
"""
indent = indent * ' '
wrap_width = wrap_max - len(indent)
if isinstance(text, Path):
text = path2str(text)
return textwrap.fill(text, width=wrap_width, initial_indent=indent,
subsequent_indent=indent, break_long_words=False,
break_on_hyphens=False) | Wrap text at given width using textwrap module.
text (unicode): Text to wrap. If it's a Path, it's converted to string.
wrap_max (int): Maximum line length (indent is deducted).
indent (int): Number of spaces for indentation.
RETURNS (unicode): Wrapped text. | Below is the the instruction that describes the task:
### Input:
Wrap text at given width using textwrap module.
text (unicode): Text to wrap. If it's a Path, it's converted to string.
wrap_max (int): Maximum line length (indent is deducted).
indent (int): Number of spaces for indentation.
RETURNS (unicode): Wrapped text.
### Response:
def _wrap(text, wrap_max=80, indent=4):
"""Wrap text at given width using textwrap module.
text (unicode): Text to wrap. If it's a Path, it's converted to string.
wrap_max (int): Maximum line length (indent is deducted).
indent (int): Number of spaces for indentation.
RETURNS (unicode): Wrapped text.
"""
indent = indent * ' '
wrap_width = wrap_max - len(indent)
if isinstance(text, Path):
text = path2str(text)
return textwrap.fill(text, width=wrap_width, initial_indent=indent,
subsequent_indent=indent, break_long_words=False,
break_on_hyphens=False) |
def file_add(self, ev, paths):
"""
Register for file change events. If there is a change to the file, all
registered tasks will be notified with a call t.event(action).
Note that as multiple tasks might register an event on the same
path, each path is mapped to a dict of tasks pointing at actions.
A task can only register a single action with each path.
"""
log = self._params.get('log', self._discard)
if not isinstance(paths, list):
paths = [paths]
for path in paths:
if path not in self._file_event_map:
self._watch_files.add(path)
self._file_event_map[path] = {}
self._file_event_map[path][ev.get_key()] = ev
log.debug("Added event key %r, action %r to path%s: %s", ev.get_key(), ev._handler_name, ses(len(paths)), paths) | Register for file change events. If there is a change to the file, all
registered tasks will be notified with a call t.event(action).
Note that as multiple tasks might register an event on the same
path, each path is mapped to a dict of tasks pointing at actions.
A task can only register a single action with each path. | Below is the the instruction that describes the task:
### Input:
Register for file change events. If there is a change to the file, all
registered tasks will be notified with a call t.event(action).
Note that as multiple tasks might register an event on the same
path, each path is mapped to a dict of tasks pointing at actions.
A task can only register a single action with each path.
### Response:
def file_add(self, ev, paths):
"""
Register for file change events. If there is a change to the file, all
registered tasks will be notified with a call t.event(action).
Note that as multiple tasks might register an event on the same
path, each path is mapped to a dict of tasks pointing at actions.
A task can only register a single action with each path.
"""
log = self._params.get('log', self._discard)
if not isinstance(paths, list):
paths = [paths]
for path in paths:
if path not in self._file_event_map:
self._watch_files.add(path)
self._file_event_map[path] = {}
self._file_event_map[path][ev.get_key()] = ev
log.debug("Added event key %r, action %r to path%s: %s", ev.get_key(), ev._handler_name, ses(len(paths)), paths) |
def random_sample(self, elements=('a', 'b', 'c'), length=None):
"""
Returns a list of random unique elements for the specified length.
Multiple occurrences of the same value increase its probability to be in the output.
"""
return self.random_elements(elements, length, unique=True) | Returns a list of random unique elements for the specified length.
Multiple occurrences of the same value increase its probability to be in the output. | Below is the the instruction that describes the task:
### Input:
Returns a list of random unique elements for the specified length.
Multiple occurrences of the same value increase its probability to be in the output.
### Response:
def random_sample(self, elements=('a', 'b', 'c'), length=None):
"""
Returns a list of random unique elements for the specified length.
Multiple occurrences of the same value increase its probability to be in the output.
"""
return self.random_elements(elements, length, unique=True) |
def execute(self):
"""
Given the command-line arguments, this creates a parser appropriate
to that command, and runs it.
"""
# retrieve default language from system environment
default_locale = os.environ.get('LANG', 'en_US').split('.')[0]
if default_locale not in AVAILABLE_LOCALES:
default_locale = DEFAULT_LOCALE
epilog = """supported locales:
{0}
Faker can take a locale as an optional argument, to return localized data. If
no locale argument is specified, the factory falls back to the user's OS
locale as long as it is supported by at least one of the providers.
- for this user, the default locale is {1}.
If the optional argument locale and/or user's default locale is not available
for the specified provider, the factory falls back to faker's default locale,
which is {2}.
examples:
$ faker address
968 Bahringer Garden Apt. 722
Kristinaland, NJ 09890
$ faker -l de_DE address
Samira-Niemeier-Allee 56
94812 Biedenkopf
$ faker profile ssn,birthdate
{{'ssn': u'628-10-1085', 'birthdate': '2008-03-29'}}
$ faker -r=3 -s=";" name
Willam Kertzmann;
Josiah Maggio;
Gayla Schmitt;
""".format(', '.join(sorted(AVAILABLE_LOCALES)),
default_locale,
DEFAULT_LOCALE)
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(
prog=self.prog_name,
description='{0} version {1}'.format(self.prog_name, VERSION),
epilog=epilog,
formatter_class=formatter_class)
parser.add_argument("--version", action="version",
version="%(prog)s {0}".format(VERSION))
parser.add_argument('-v',
'--verbose',
action='store_true',
help="show INFO logging events instead "
"of CRITICAL, which is the default. These logging "
"events provide insight into localization of "
"specific providers.")
parser.add_argument('-o', metavar="output",
type=argparse.FileType('w'),
default=sys.stdout,
help="redirect output to a file")
parser.add_argument('-l', '--lang',
choices=AVAILABLE_LOCALES,
default=default_locale,
metavar='LOCALE',
help="specify the language for a localized "
"provider (e.g. de_DE)")
parser.add_argument('-r', '--repeat',
default=1,
type=int,
help="generate the specified number of outputs")
parser.add_argument('-s', '--sep',
default='\n',
help="use the specified separator after each "
"output")
parser.add_argument('--seed', metavar='SEED',
type=int,
help="specify a seed for the random generator so "
"that results are repeatable. Also compatible "
"with 'repeat' option")
parser.add_argument('-i',
'--include',
default=META_PROVIDERS_MODULES,
nargs='*',
help="list of additional custom providers to "
"user, given as the import path of the module "
"containing your Provider class (not the provider "
"class itself)")
parser.add_argument('fake',
action='store',
nargs='?',
help="name of the fake to generate output for "
"(e.g. profile)")
parser.add_argument('fake_args',
metavar="fake argument",
action='store',
nargs='*',
help="optional arguments to pass to the fake "
"(e.g. the profile fake takes an optional "
"list of comma separated field names as the "
"first argument)")
arguments = parser.parse_args(self.argv[1:])
if arguments.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.CRITICAL)
random.seed(arguments.seed)
seeds = random.sample(range(arguments.repeat*10), arguments.repeat)
for i in range(arguments.repeat):
print_doc(arguments.fake,
arguments.fake_args,
lang=arguments.lang,
output=arguments.o,
seed=seeds[i],
includes=arguments.include,
)
print(arguments.sep, file=arguments.o)
if not arguments.fake:
# repeat not supported for all docs
break | Given the command-line arguments, this creates a parser appropriate
to that command, and runs it. | Below is the the instruction that describes the task:
### Input:
Given the command-line arguments, this creates a parser appropriate
to that command, and runs it.
### Response:
def execute(self):
"""
Given the command-line arguments, this creates a parser appropriate
to that command, and runs it.
"""
# retrieve default language from system environment
default_locale = os.environ.get('LANG', 'en_US').split('.')[0]
if default_locale not in AVAILABLE_LOCALES:
default_locale = DEFAULT_LOCALE
epilog = """supported locales:
{0}
Faker can take a locale as an optional argument, to return localized data. If
no locale argument is specified, the factory falls back to the user's OS
locale as long as it is supported by at least one of the providers.
- for this user, the default locale is {1}.
If the optional argument locale and/or user's default locale is not available
for the specified provider, the factory falls back to faker's default locale,
which is {2}.
examples:
$ faker address
968 Bahringer Garden Apt. 722
Kristinaland, NJ 09890
$ faker -l de_DE address
Samira-Niemeier-Allee 56
94812 Biedenkopf
$ faker profile ssn,birthdate
{{'ssn': u'628-10-1085', 'birthdate': '2008-03-29'}}
$ faker -r=3 -s=";" name
Willam Kertzmann;
Josiah Maggio;
Gayla Schmitt;
""".format(', '.join(sorted(AVAILABLE_LOCALES)),
default_locale,
DEFAULT_LOCALE)
formatter_class = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(
prog=self.prog_name,
description='{0} version {1}'.format(self.prog_name, VERSION),
epilog=epilog,
formatter_class=formatter_class)
parser.add_argument("--version", action="version",
version="%(prog)s {0}".format(VERSION))
parser.add_argument('-v',
'--verbose',
action='store_true',
help="show INFO logging events instead "
"of CRITICAL, which is the default. These logging "
"events provide insight into localization of "
"specific providers.")
parser.add_argument('-o', metavar="output",
type=argparse.FileType('w'),
default=sys.stdout,
help="redirect output to a file")
parser.add_argument('-l', '--lang',
choices=AVAILABLE_LOCALES,
default=default_locale,
metavar='LOCALE',
help="specify the language for a localized "
"provider (e.g. de_DE)")
parser.add_argument('-r', '--repeat',
default=1,
type=int,
help="generate the specified number of outputs")
parser.add_argument('-s', '--sep',
default='\n',
help="use the specified separator after each "
"output")
parser.add_argument('--seed', metavar='SEED',
type=int,
help="specify a seed for the random generator so "
"that results are repeatable. Also compatible "
"with 'repeat' option")
parser.add_argument('-i',
'--include',
default=META_PROVIDERS_MODULES,
nargs='*',
help="list of additional custom providers to "
"user, given as the import path of the module "
"containing your Provider class (not the provider "
"class itself)")
parser.add_argument('fake',
action='store',
nargs='?',
help="name of the fake to generate output for "
"(e.g. profile)")
parser.add_argument('fake_args',
metavar="fake argument",
action='store',
nargs='*',
help="optional arguments to pass to the fake "
"(e.g. the profile fake takes an optional "
"list of comma separated field names as the "
"first argument)")
arguments = parser.parse_args(self.argv[1:])
if arguments.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.CRITICAL)
random.seed(arguments.seed)
seeds = random.sample(range(arguments.repeat*10), arguments.repeat)
for i in range(arguments.repeat):
print_doc(arguments.fake,
arguments.fake_args,
lang=arguments.lang,
output=arguments.o,
seed=seeds[i],
includes=arguments.include,
)
print(arguments.sep, file=arguments.o)
if not arguments.fake:
# repeat not supported for all docs
break |
def walk(self,depth=0,fsNode=None) :
"""Note, this is a filtered walk"""
if not fsNode :
fsNode = FSNode(self.init_path,self.init_path,0)
if fsNode.isdir() :
if self.check_dir(fsNode) :
if self.check_return(fsNode) :
yield fsNode
for n in fsNode.children() :
if n.islink() :
# currently we don't follow links
continue
for n2 in self.walk(depth+1,n) :
if self.check_return(n2) :
yield n2
else :
if self.check_file(fsNode) :
if self.check_return(fsNode) :
yield fsNode
raise StopIteration | Note, this is a filtered walk | Below is the the instruction that describes the task:
### Input:
Note, this is a filtered walk
### Response:
def walk(self,depth=0,fsNode=None) :
"""Note, this is a filtered walk"""
if not fsNode :
fsNode = FSNode(self.init_path,self.init_path,0)
if fsNode.isdir() :
if self.check_dir(fsNode) :
if self.check_return(fsNode) :
yield fsNode
for n in fsNode.children() :
if n.islink() :
# currently we don't follow links
continue
for n2 in self.walk(depth+1,n) :
if self.check_return(n2) :
yield n2
else :
if self.check_file(fsNode) :
if self.check_return(fsNode) :
yield fsNode
raise StopIteration |
def install_language(cls, language_code):
"""Install the translations for language specified by `language_code`.
If we don't have translations for this language, then the default language will be used.
If the language specified is already installed, then this is a no-op.
"""
# Skip if the language is already installed
if language_code == cls.language:
return
try:
cls._active_catalogs = cls._translation_catalogs[language_code]
cls.language = language_code
log.debug('Installed language %s', language_code)
except KeyError:
default = settings.DEFAULT_LANG
log.warning('Unknown language %s, falling back to %s', language_code, default)
cls._active_catalogs = cls._translation_catalogs[default]
cls.language = default | Install the translations for language specified by `language_code`.
If we don't have translations for this language, then the default language will be used.
If the language specified is already installed, then this is a no-op. | Below is the the instruction that describes the task:
### Input:
Install the translations for language specified by `language_code`.
If we don't have translations for this language, then the default language will be used.
If the language specified is already installed, then this is a no-op.
### Response:
def install_language(cls, language_code):
"""Install the translations for language specified by `language_code`.
If we don't have translations for this language, then the default language will be used.
If the language specified is already installed, then this is a no-op.
"""
# Skip if the language is already installed
if language_code == cls.language:
return
try:
cls._active_catalogs = cls._translation_catalogs[language_code]
cls.language = language_code
log.debug('Installed language %s', language_code)
except KeyError:
default = settings.DEFAULT_LANG
log.warning('Unknown language %s, falling back to %s', language_code, default)
cls._active_catalogs = cls._translation_catalogs[default]
cls.language = default |
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i:i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
) | Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order. | Below is the the instruction that describes the task:
### Input:
Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
### Response:
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i:i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
) |
def freeze(self, progressbar=None):
"""
Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`.
"""
# Call the storage broker pre_freeze hook.
self._storage_broker.pre_freeze_hook()
if progressbar:
progressbar.label = "Freezing dataset"
# Generate and persist the manifest.
manifest = self.generate_manifest(progressbar=progressbar)
self._storage_broker.put_manifest(manifest)
# Generate and persist overlays from any item metadata that has been
# added.
overlays = self._generate_overlays()
for overlay_name, overlay in overlays.items():
self._put_overlay(overlay_name, overlay)
# Change the type of the dataset from "protodataset" to "dataset" and
# add a "frozen_at" time stamp to the administrative metadata.
datetime_obj = datetime.datetime.utcnow()
metadata_update = {
"type": "dataset",
"frozen_at": dtoolcore.utils.timestamp(datetime_obj)
}
self._admin_metadata.update(metadata_update)
self._storage_broker.put_admin_metadata(self._admin_metadata)
# Clean up using the storage broker's post freeze hook.
self._storage_broker.post_freeze_hook() | Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`. | Below is the the instruction that describes the task:
### Input:
Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`.
### Response:
def freeze(self, progressbar=None):
"""
Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`.
"""
# Call the storage broker pre_freeze hook.
self._storage_broker.pre_freeze_hook()
if progressbar:
progressbar.label = "Freezing dataset"
# Generate and persist the manifest.
manifest = self.generate_manifest(progressbar=progressbar)
self._storage_broker.put_manifest(manifest)
# Generate and persist overlays from any item metadata that has been
# added.
overlays = self._generate_overlays()
for overlay_name, overlay in overlays.items():
self._put_overlay(overlay_name, overlay)
# Change the type of the dataset from "protodataset" to "dataset" and
# add a "frozen_at" time stamp to the administrative metadata.
datetime_obj = datetime.datetime.utcnow()
metadata_update = {
"type": "dataset",
"frozen_at": dtoolcore.utils.timestamp(datetime_obj)
}
self._admin_metadata.update(metadata_update)
self._storage_broker.put_admin_metadata(self._admin_metadata)
# Clean up using the storage broker's post freeze hook.
self._storage_broker.post_freeze_hook() |
def report(self): # pragma: nocover
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = list(self.modules.keys())
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = list(self.badmodules[name].keys())
mods.sort()
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules thay appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = list(self.badmodules[name].keys())
mods.sort()
print("?", name, "imported from", ', '.join(mods)) | Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing. | Below is the the instruction that describes the task:
### Input:
Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
### Response:
def report(self): # pragma: nocover
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = list(self.modules.keys())
keys.sort()
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = list(self.badmodules[name].keys())
mods.sort()
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules thay appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = list(self.badmodules[name].keys())
mods.sort()
print("?", name, "imported from", ', '.join(mods)) |
def jupyter_notebook(script_blocks, gallery_conf):
"""Generate a Jupyter notebook file cell-by-cell
Parameters
----------
script_blocks : list
Script execution cells.
gallery_conf : dict
The sphinx-gallery configuration dictionary.
"""
first_cell = gallery_conf.get("first_notebook_cell", "%matplotlib inline")
work_notebook = jupyter_notebook_skeleton()
if first_cell is not None:
add_code_cell(work_notebook, first_cell)
fill_notebook(work_notebook, script_blocks)
return work_notebook | Generate a Jupyter notebook file cell-by-cell
Parameters
----------
script_blocks : list
Script execution cells.
gallery_conf : dict
The sphinx-gallery configuration dictionary. | Below is the the instruction that describes the task:
### Input:
Generate a Jupyter notebook file cell-by-cell
Parameters
----------
script_blocks : list
Script execution cells.
gallery_conf : dict
The sphinx-gallery configuration dictionary.
### Response:
def jupyter_notebook(script_blocks, gallery_conf):
"""Generate a Jupyter notebook file cell-by-cell
Parameters
----------
script_blocks : list
Script execution cells.
gallery_conf : dict
The sphinx-gallery configuration dictionary.
"""
first_cell = gallery_conf.get("first_notebook_cell", "%matplotlib inline")
work_notebook = jupyter_notebook_skeleton()
if first_cell is not None:
add_code_cell(work_notebook, first_cell)
fill_notebook(work_notebook, script_blocks)
return work_notebook |
def interfaces(self):
"""
Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances.
"""
if 'interfaces' not in self._cached_values:
self._cached_values['interfaces'] = []
for name, interface in six.iteritems(self.settings.INTERFACES):
interface_name = 'interface_%s' % name
# Hostname:port + unix socket
try:
listen = self.config_parser.get(interface_name, 'Listen')
except CONFIGPARSER_EXC:
listen = interface.get('LISTEN')
try:
unix_socket = self.config_parser.get(
interface_name, 'UnixSocket')
except CONFIGPARSER_EXC:
unix_socket = interface.get('UNIX_SOCKET')
if not listen and not unix_socket:
raise ValueError(
'Interface MUST listen either on TCP '
'or UNIX socket or both')
host, port = parse_host(listen) if listen else (None, None)
# Processes
try:
processes = self.config_parser.getint(
interface_name, 'Processes')
except CONFIGPARSER_EXC:
processes = int(interface.get('PROCESSES', 1))
# Urls
try:
urls_obj_name = self.config_parser.get(
interface_name, 'Urls')
except CONFIGPARSER_EXC:
urls_obj_name = interface.get('URLS', '')
if urls_obj_name:
urls = import_object(urls_obj_name)
else:
urls = ()
self._cached_values['interfaces'].append(
self.Interface(
name, host, port, unix_socket, processes, urls)
)
return self._cached_values['interfaces'] | Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances. | Below is the the instruction that describes the task:
### Input:
Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances.
### Response:
def interfaces(self):
"""
Interfaces as a :class:`list`of the
:class:`shelter.core.config.Config.Interface` instances.
"""
if 'interfaces' not in self._cached_values:
self._cached_values['interfaces'] = []
for name, interface in six.iteritems(self.settings.INTERFACES):
interface_name = 'interface_%s' % name
# Hostname:port + unix socket
try:
listen = self.config_parser.get(interface_name, 'Listen')
except CONFIGPARSER_EXC:
listen = interface.get('LISTEN')
try:
unix_socket = self.config_parser.get(
interface_name, 'UnixSocket')
except CONFIGPARSER_EXC:
unix_socket = interface.get('UNIX_SOCKET')
if not listen and not unix_socket:
raise ValueError(
'Interface MUST listen either on TCP '
'or UNIX socket or both')
host, port = parse_host(listen) if listen else (None, None)
# Processes
try:
processes = self.config_parser.getint(
interface_name, 'Processes')
except CONFIGPARSER_EXC:
processes = int(interface.get('PROCESSES', 1))
# Urls
try:
urls_obj_name = self.config_parser.get(
interface_name, 'Urls')
except CONFIGPARSER_EXC:
urls_obj_name = interface.get('URLS', '')
if urls_obj_name:
urls = import_object(urls_obj_name)
else:
urls = ()
self._cached_values['interfaces'].append(
self.Interface(
name, host, port, unix_socket, processes, urls)
)
return self._cached_values['interfaces'] |
def read_column_data_from_txt(fname):
"""
Read data from a simple text file.
Format should be just numbers.
First column is the dependent variable. others are independent.
Whitespace delimited.
Returns
-------
x_values : list
List of x columns
y_values : list
list of y values
"""
datafile = open(fname)
datarows = []
for line in datafile:
datarows.append([float(li) for li in line.split()])
datacols = list(zip(*datarows))
x_values = datacols[1:]
y_values = datacols[0]
return x_values, y_values | Read data from a simple text file.
Format should be just numbers.
First column is the dependent variable. others are independent.
Whitespace delimited.
Returns
-------
x_values : list
List of x columns
y_values : list
list of y values | Below is the the instruction that describes the task:
### Input:
Read data from a simple text file.
Format should be just numbers.
First column is the dependent variable. others are independent.
Whitespace delimited.
Returns
-------
x_values : list
List of x columns
y_values : list
list of y values
### Response:
def read_column_data_from_txt(fname):
"""
Read data from a simple text file.
Format should be just numbers.
First column is the dependent variable. others are independent.
Whitespace delimited.
Returns
-------
x_values : list
List of x columns
y_values : list
list of y values
"""
datafile = open(fname)
datarows = []
for line in datafile:
datarows.append([float(li) for li in line.split()])
datacols = list(zip(*datarows))
x_values = datacols[1:]
y_values = datacols[0]
return x_values, y_values |
def detectVerbChainsFromSent( self, sentence, **kwargs):
''' Detect verb chains from given sentence.
Parameters
----------
sentence: list of dict
A list of sentence words, each word in form of a dictionary containing
morphological analysis and clause boundary annotations (must have CLAUSE_IDX);
Keyword parameters
------------------
expand2ndTime: boolean
If True, regular verb chains (chains not ending with 'olema') are expanded twice.
(default: False)
breakOnPunctuation: boolean
If True, expansion of regular verb chains will be broken in case of intervening punctuation.
(default: False)
removeSingleAraEi: boolean
if True, verb chains consisting of a single word, 'ära' or 'ei', will be removed.
(default: True)
removeOverlapping: boolean
If True, overlapping verb chains will be removed.
(default: True)
Returns
-------
list of dict
List of detected verb chains, each verb chain has following attributes (keys):
PHRASE -- list of int : indexes pointing to elements in sentence that belong
to the chain;
PATTERN -- list of str : for each word in phrase, marks whether it is 'ega', 'ei',
'ära', 'pole', 'ole', '&' (conjunction: ja/ning/ega/või)
'verb' (verb different than 'ole') or 'nom/adv';
ANALYSIS_IDS -- list of (list of int) : for each word in phrase, points to index(es) of
morphological analyses that correspond to words
in the verb chains;
ROOTS -- list of str : for each word in phrase, lists its corresponding ROOT
value from the morphological analysis; e.g. for the verb
chain 'püüab kodeerida', the ROOT will be ['püüd',
'kodeeri'];
MORPH -- list of str : for each word in phrase, lists its part-of-speech value
and morphological form (in one string, separated by '_',
and multiple variants of the pos/form separated by '/');
e.g. for the verb chain 'on tulnud', the MORPH value
will be ['V_vad/b', 'V_nud'];
OTHER_VERBS -- bool : whether there are other verbs in the context, potentially being
part of the verb chain; if this is True, it is uncertain whether
the chain is complete or not;
POLARITY -- 'POS', 'NEG' or '??' : grammatical polarity of the verb chain; Negative
polarity indicates that the verb phrase begins
with 'ei', 'ega', 'ära' or 'pole';
TENSE -- tense of the main verb: 'present', 'imperfect', 'perfect',
'pluperfect', 'past', '??';
MOOD -- mood of the main verb: 'indic', 'imper', 'condit', 'quotat', '??';
VOICE -- voice of the main verb: 'personal', 'impersonal', '??';
'''
# 0) Parse given arguments
expand2ndTime = False
removeOverlapping = True
removeSingleAraEi = True
breakOnPunctuation = False
for argName, argVal in kwargs.items():
if argName == 'expand2ndTime':
expand2ndTime = bool(argVal)
elif argName == 'removeOverlapping':
removeOverlapping = bool(argVal)
elif argName == 'removeSingleAraEi':
removeSingleAraEi = bool(argVal)
elif argName == 'breakOnPunctuation':
breakOnPunctuation = bool(argVal)
else:
raise Exception(' Unsupported argument given: '+argName)
# 1) Preprocessing
sentence = addWordIDs( sentence )
clauses = getClausesByClauseIDs( sentence )
# 2) Extract predicate-centric verb chains within each clause
allDetectedVerbChains = []
for clauseID in clauses:
clause = clauses[clauseID]
# 2.1) Extract predicate-centric verb chains within each clause
detectedBasicChains = _extractBasicPredicateFromClause(clause, clauseID)
allDetectedVerbChains.extend( detectedBasicChains )
# 2.2) Extract 'saama' + 'tud' verb phrases (typically rare)
_expandSaamaWithTud( clause, clauseID, allDetectedVerbChains )
# 2.3) Extend 'olema' chains with 'nud/tud/mas/mata' verbs (if possible)
_expandOlemaVerbChains( clause, clauseID, allDetectedVerbChains )
# 2.4) Expand non-olema verb chains inside the clause where possible (verb+verb chains)
_expandVerbChainsBySubcat( clause, clauseID, allDetectedVerbChains, self.verbInfSubcatLexicon, False, breakOnPunctuation)
# 2.5) Determine for which verb chains the context should be clear
# (no additional verbs can be added to the phrase)
_determineVerbChainContextualAmbiguity( clause, clauseID, allDetectedVerbChains)
# 2.6) Expand non-olema verb chains inside the clause 2nd time (verb+verb+verb chains)
# (Note that while verb+verb+verb+verb+... chains are also possible, three verbs
# seems to be a critical length: longer chains are rare and thus making longer
# chains will likely lead to errors);
if expand2ndTime:
_expandVerbChainsBySubcat( clause, clauseID, allDetectedVerbChains, self.verbInfSubcatLexicon, False, breakOnPunctuation)
# 3) Extract 'ega' negations (considering the whole sentence context)
expandableEgaFound = _extractEgaNegFromSent( sentence, clauses, allDetectedVerbChains )
if expandableEgaFound:
for clauseID in clauses:
clause = clauses[clauseID]
# 3.1) Expand non-olema 'ega' verb chains inside the clause, if possible;
_expandVerbChainsBySubcat( clause, clauseID, allDetectedVerbChains, self.verbInfSubcatLexicon, False, breakOnPunctuation)
#_debugPrint(' | '+getJsonAsTextString(sentence, markTokens = [ verbObj[PHRASE] for verbObj in allDetectedVerbChains ]))
# 4) Extend chains with nom/adv + Vinf relations
if self.verbNomAdvVinfExtender:
addGrammaticalFeatsAndRoots( sentence, allDetectedVerbChains )
for clauseID in clauses:
clause = clauses[clauseID]
expansionPerformed = \
self.verbNomAdvVinfExtender.extendChainsInClause( clause, clauseID, allDetectedVerbChains )
if expansionPerformed:
_determineVerbChainContextualAmbiguity( clause, clauseID, allDetectedVerbChains)
# ) Remove redundant and overlapping verb phrases
removeRedundantVerbChains( allDetectedVerbChains, removeOverlapping = removeOverlapping, removeSingleAraAndEi = removeSingleAraEi )
# ) Add grammatical features (in the end)
addGrammaticalFeatsAndRoots( sentence, allDetectedVerbChains )
return allDetectedVerbChains | Detect verb chains from given sentence.
Parameters
----------
sentence: list of dict
A list of sentence words, each word in form of a dictionary containing
morphological analysis and clause boundary annotations (must have CLAUSE_IDX);
Keyword parameters
------------------
expand2ndTime: boolean
If True, regular verb chains (chains not ending with 'olema') are expanded twice.
(default: False)
breakOnPunctuation: boolean
If True, expansion of regular verb chains will be broken in case of intervening punctuation.
(default: False)
removeSingleAraEi: boolean
if True, verb chains consisting of a single word, 'ära' or 'ei', will be removed.
(default: True)
removeOverlapping: boolean
If True, overlapping verb chains will be removed.
(default: True)
Returns
-------
list of dict
List of detected verb chains, each verb chain has following attributes (keys):
PHRASE -- list of int : indexes pointing to elements in sentence that belong
to the chain;
PATTERN -- list of str : for each word in phrase, marks whether it is 'ega', 'ei',
'ära', 'pole', 'ole', '&' (conjunction: ja/ning/ega/või)
'verb' (verb different than 'ole') or 'nom/adv';
ANALYSIS_IDS -- list of (list of int) : for each word in phrase, points to index(es) of
morphological analyses that correspond to words
in the verb chains;
ROOTS -- list of str : for each word in phrase, lists its corresponding ROOT
value from the morphological analysis; e.g. for the verb
chain 'püüab kodeerida', the ROOT will be ['püüd',
'kodeeri'];
MORPH -- list of str : for each word in phrase, lists its part-of-speech value
and morphological form (in one string, separated by '_',
and multiple variants of the pos/form separated by '/');
e.g. for the verb chain 'on tulnud', the MORPH value
will be ['V_vad/b', 'V_nud'];
OTHER_VERBS -- bool : whether there are other verbs in the context, potentially being
part of the verb chain; if this is True, it is uncertain whether
the chain is complete or not;
POLARITY -- 'POS', 'NEG' or '??' : grammatical polarity of the verb chain; Negative
polarity indicates that the verb phrase begins
with 'ei', 'ega', 'ära' or 'pole';
TENSE -- tense of the main verb: 'present', 'imperfect', 'perfect',
'pluperfect', 'past', '??';
MOOD -- mood of the main verb: 'indic', 'imper', 'condit', 'quotat', '??';
VOICE -- voice of the main verb: 'personal', 'impersonal', '??'; | Below is the the instruction that describes the task:
### Input:
Detect verb chains from given sentence.
Parameters
----------
sentence: list of dict
A list of sentence words, each word in form of a dictionary containing
morphological analysis and clause boundary annotations (must have CLAUSE_IDX);
Keyword parameters
------------------
expand2ndTime: boolean
If True, regular verb chains (chains not ending with 'olema') are expanded twice.
(default: False)
breakOnPunctuation: boolean
If True, expansion of regular verb chains will be broken in case of intervening punctuation.
(default: False)
removeSingleAraEi: boolean
if True, verb chains consisting of a single word, 'ära' or 'ei', will be removed.
(default: True)
removeOverlapping: boolean
If True, overlapping verb chains will be removed.
(default: True)
Returns
-------
list of dict
List of detected verb chains, each verb chain has following attributes (keys):
PHRASE -- list of int : indexes pointing to elements in sentence that belong
to the chain;
PATTERN -- list of str : for each word in phrase, marks whether it is 'ega', 'ei',
'ära', 'pole', 'ole', '&' (conjunction: ja/ning/ega/või)
'verb' (verb different than 'ole') or 'nom/adv';
ANALYSIS_IDS -- list of (list of int) : for each word in phrase, points to index(es) of
morphological analyses that correspond to words
in the verb chains;
ROOTS -- list of str : for each word in phrase, lists its corresponding ROOT
value from the morphological analysis; e.g. for the verb
chain 'püüab kodeerida', the ROOT will be ['püüd',
'kodeeri'];
MORPH -- list of str : for each word in phrase, lists its part-of-speech value
and morphological form (in one string, separated by '_',
and multiple variants of the pos/form separated by '/');
e.g. for the verb chain 'on tulnud', the MORPH value
will be ['V_vad/b', 'V_nud'];
OTHER_VERBS -- bool : whether there are other verbs in the context, potentially being
part of the verb chain; if this is True, it is uncertain whether
the chain is complete or not;
POLARITY -- 'POS', 'NEG' or '??' : grammatical polarity of the verb chain; Negative
polarity indicates that the verb phrase begins
with 'ei', 'ega', 'ära' or 'pole';
TENSE -- tense of the main verb: 'present', 'imperfect', 'perfect',
'pluperfect', 'past', '??';
MOOD -- mood of the main verb: 'indic', 'imper', 'condit', 'quotat', '??';
VOICE -- voice of the main verb: 'personal', 'impersonal', '??';
### Response:
def detectVerbChainsFromSent( self, sentence, **kwargs):
''' Detect verb chains from given sentence.
Parameters
----------
sentence: list of dict
A list of sentence words, each word in form of a dictionary containing
morphological analysis and clause boundary annotations (must have CLAUSE_IDX);
Keyword parameters
------------------
expand2ndTime: boolean
If True, regular verb chains (chains not ending with 'olema') are expanded twice.
(default: False)
breakOnPunctuation: boolean
If True, expansion of regular verb chains will be broken in case of intervening punctuation.
(default: False)
removeSingleAraEi: boolean
if True, verb chains consisting of a single word, 'ära' or 'ei', will be removed.
(default: True)
removeOverlapping: boolean
If True, overlapping verb chains will be removed.
(default: True)
Returns
-------
list of dict
List of detected verb chains, each verb chain has following attributes (keys):
PHRASE -- list of int : indexes pointing to elements in sentence that belong
to the chain;
PATTERN -- list of str : for each word in phrase, marks whether it is 'ega', 'ei',
'ära', 'pole', 'ole', '&' (conjunction: ja/ning/ega/või)
'verb' (verb different than 'ole') or 'nom/adv';
ANALYSIS_IDS -- list of (list of int) : for each word in phrase, points to index(es) of
morphological analyses that correspond to words
in the verb chains;
ROOTS -- list of str : for each word in phrase, lists its corresponding ROOT
value from the morphological analysis; e.g. for the verb
chain 'püüab kodeerida', the ROOT will be ['püüd',
'kodeeri'];
MORPH -- list of str : for each word in phrase, lists its part-of-speech value
and morphological form (in one string, separated by '_',
and multiple variants of the pos/form separated by '/');
e.g. for the verb chain 'on tulnud', the MORPH value
will be ['V_vad/b', 'V_nud'];
OTHER_VERBS -- bool : whether there are other verbs in the context, potentially being
part of the verb chain; if this is True, it is uncertain whether
the chain is complete or not;
POLARITY -- 'POS', 'NEG' or '??' : grammatical polarity of the verb chain; Negative
polarity indicates that the verb phrase begins
with 'ei', 'ega', 'ära' or 'pole';
TENSE -- tense of the main verb: 'present', 'imperfect', 'perfect',
'pluperfect', 'past', '??';
MOOD -- mood of the main verb: 'indic', 'imper', 'condit', 'quotat', '??';
VOICE -- voice of the main verb: 'personal', 'impersonal', '??';
'''
# 0) Parse given arguments
expand2ndTime = False
removeOverlapping = True
removeSingleAraEi = True
breakOnPunctuation = False
for argName, argVal in kwargs.items():
if argName == 'expand2ndTime':
expand2ndTime = bool(argVal)
elif argName == 'removeOverlapping':
removeOverlapping = bool(argVal)
elif argName == 'removeSingleAraEi':
removeSingleAraEi = bool(argVal)
elif argName == 'breakOnPunctuation':
breakOnPunctuation = bool(argVal)
else:
raise Exception(' Unsupported argument given: '+argName)
# 1) Preprocessing
sentence = addWordIDs( sentence )
clauses = getClausesByClauseIDs( sentence )
# 2) Extract predicate-centric verb chains within each clause
allDetectedVerbChains = []
for clauseID in clauses:
clause = clauses[clauseID]
# 2.1) Extract predicate-centric verb chains within each clause
detectedBasicChains = _extractBasicPredicateFromClause(clause, clauseID)
allDetectedVerbChains.extend( detectedBasicChains )
# 2.2) Extract 'saama' + 'tud' verb phrases (typically rare)
_expandSaamaWithTud( clause, clauseID, allDetectedVerbChains )
# 2.3) Extend 'olema' chains with 'nud/tud/mas/mata' verbs (if possible)
_expandOlemaVerbChains( clause, clauseID, allDetectedVerbChains )
# 2.4) Expand non-olema verb chains inside the clause where possible (verb+verb chains)
_expandVerbChainsBySubcat( clause, clauseID, allDetectedVerbChains, self.verbInfSubcatLexicon, False, breakOnPunctuation)
# 2.5) Determine for which verb chains the context should be clear
# (no additional verbs can be added to the phrase)
_determineVerbChainContextualAmbiguity( clause, clauseID, allDetectedVerbChains)
# 2.6) Expand non-olema verb chains inside the clause 2nd time (verb+verb+verb chains)
# (Note that while verb+verb+verb+verb+... chains are also possible, three verbs
# seems to be a critical length: longer chains are rare and thus making longer
# chains will likely lead to errors);
if expand2ndTime:
_expandVerbChainsBySubcat( clause, clauseID, allDetectedVerbChains, self.verbInfSubcatLexicon, False, breakOnPunctuation)
# 3) Extract 'ega' negations (considering the whole sentence context)
expandableEgaFound = _extractEgaNegFromSent( sentence, clauses, allDetectedVerbChains )
if expandableEgaFound:
for clauseID in clauses:
clause = clauses[clauseID]
# 3.1) Expand non-olema 'ega' verb chains inside the clause, if possible;
_expandVerbChainsBySubcat( clause, clauseID, allDetectedVerbChains, self.verbInfSubcatLexicon, False, breakOnPunctuation)
#_debugPrint(' | '+getJsonAsTextString(sentence, markTokens = [ verbObj[PHRASE] for verbObj in allDetectedVerbChains ]))
# 4) Extend chains with nom/adv + Vinf relations
if self.verbNomAdvVinfExtender:
addGrammaticalFeatsAndRoots( sentence, allDetectedVerbChains )
for clauseID in clauses:
clause = clauses[clauseID]
expansionPerformed = \
self.verbNomAdvVinfExtender.extendChainsInClause( clause, clauseID, allDetectedVerbChains )
if expansionPerformed:
_determineVerbChainContextualAmbiguity( clause, clauseID, allDetectedVerbChains)
# ) Remove redundant and overlapping verb phrases
removeRedundantVerbChains( allDetectedVerbChains, removeOverlapping = removeOverlapping, removeSingleAraAndEi = removeSingleAraEi )
# ) Add grammatical features (in the end)
addGrammaticalFeatsAndRoots( sentence, allDetectedVerbChains )
return allDetectedVerbChains |
def add_agent(self, overall_index=None, team_index=None):
"""
Creates the agent using self.agent_class and adds it to the index manager.
:param overall_index: The index of the bot in the config file if it already exists.
:param team_index: The index of the team to place the agent in
:return agent: an Agent (gui_agent) with either given index or a free one, returns None if there is no index given and all indices are occupied
"""
if overall_index is None:
if not self.index_manager.has_free_slots():
return
overall_index = self.index_manager.get_new_index()
else:
self.index_manager.use_index(overall_index)
agent = GUIAgent(overall_index=overall_index)
if team_index is not None:
agent.set_team(team_index)
self.agents.append(agent)
self.overall_config.set_value(MATCH_CONFIGURATION_HEADER, PARTICIPANT_COUNT_KEY, len(self.agents))
return agent | Creates the agent using self.agent_class and adds it to the index manager.
:param overall_index: The index of the bot in the config file if it already exists.
:param team_index: The index of the team to place the agent in
:return agent: an Agent (gui_agent) with either given index or a free one, returns None if there is no index given and all indices are occupied | Below is the the instruction that describes the task:
### Input:
Creates the agent using self.agent_class and adds it to the index manager.
:param overall_index: The index of the bot in the config file if it already exists.
:param team_index: The index of the team to place the agent in
:return agent: an Agent (gui_agent) with either given index or a free one, returns None if there is no index given and all indices are occupied
### Response:
def add_agent(self, overall_index=None, team_index=None):
"""
Creates the agent using self.agent_class and adds it to the index manager.
:param overall_index: The index of the bot in the config file if it already exists.
:param team_index: The index of the team to place the agent in
:return agent: an Agent (gui_agent) with either given index or a free one, returns None if there is no index given and all indices are occupied
"""
if overall_index is None:
if not self.index_manager.has_free_slots():
return
overall_index = self.index_manager.get_new_index()
else:
self.index_manager.use_index(overall_index)
agent = GUIAgent(overall_index=overall_index)
if team_index is not None:
agent.set_team(team_index)
self.agents.append(agent)
self.overall_config.set_value(MATCH_CONFIGURATION_HEADER, PARTICIPANT_COUNT_KEY, len(self.agents))
return agent |
async def _keepalive(self):
'''
Keep our connect to server alive forever, with some
pointless traffic.
'''
while self.protocol:
vers = await self.RPC('server.version')
logger.debug("Server version: " + repr(vers))
# Five minutes isn't really enough anymore; looks like
# servers are killing 2-minute old idle connections now.
# But decreasing interval this seems rude.
await asyncio.sleep(600) | Keep our connect to server alive forever, with some
pointless traffic. | Below is the the instruction that describes the task:
### Input:
Keep our connect to server alive forever, with some
pointless traffic.
### Response:
async def _keepalive(self):
'''
Keep our connect to server alive forever, with some
pointless traffic.
'''
while self.protocol:
vers = await self.RPC('server.version')
logger.debug("Server version: " + repr(vers))
# Five minutes isn't really enough anymore; looks like
# servers are killing 2-minute old idle connections now.
# But decreasing interval this seems rude.
await asyncio.sleep(600) |
def _set_winning_team(self):
"""Mark the winning team."""
if not self._summary['finished']:
return
for team in self._summary['diplomacy']['teams']:
team['winner'] = False
for player_number in team['player_numbers']:
for player in self._summary['players']:
if player_number == player['number']:
if player['winner']:
team['winner'] = True | Mark the winning team. | Below is the the instruction that describes the task:
### Input:
Mark the winning team.
### Response:
def _set_winning_team(self):
"""Mark the winning team."""
if not self._summary['finished']:
return
for team in self._summary['diplomacy']['teams']:
team['winner'] = False
for player_number in team['player_numbers']:
for player in self._summary['players']:
if player_number == player['number']:
if player['winner']:
team['winner'] = True |
def _decode_sensor_data(properties):
"""Decode, decompress, and parse the data from the history API"""
b64_input = ""
for s in properties.get('payload'):
# pylint: disable=consider-using-join
b64_input += s
decoded = base64.b64decode(b64_input)
data = zlib.decompress(decoded)
points = []
i = 0
while i < len(data):
points.append({
'timestamp': int(1e3 * ArloBaseStation._parse_statistic(
data[i:(i + 4)], 0)),
'temperature': ArloBaseStation._parse_statistic(
data[(i + 8):(i + 10)], 1),
'humidity': ArloBaseStation._parse_statistic(
data[(i + 14):(i + 16)], 1),
'airQuality': ArloBaseStation._parse_statistic(
data[(i + 20):(i + 22)], 1)
})
i += 22
return points | Decode, decompress, and parse the data from the history API | Below is the the instruction that describes the task:
### Input:
Decode, decompress, and parse the data from the history API
### Response:
def _decode_sensor_data(properties):
"""Decode, decompress, and parse the data from the history API"""
b64_input = ""
for s in properties.get('payload'):
# pylint: disable=consider-using-join
b64_input += s
decoded = base64.b64decode(b64_input)
data = zlib.decompress(decoded)
points = []
i = 0
while i < len(data):
points.append({
'timestamp': int(1e3 * ArloBaseStation._parse_statistic(
data[i:(i + 4)], 0)),
'temperature': ArloBaseStation._parse_statistic(
data[(i + 8):(i + 10)], 1),
'humidity': ArloBaseStation._parse_statistic(
data[(i + 14):(i + 16)], 1),
'airQuality': ArloBaseStation._parse_statistic(
data[(i + 20):(i + 22)], 1)
})
i += 22
return points |
def document_core_programs(p):
"""
Document a subset of core programs with purpose (and intent)
"""
p.comment('programs.py', 'collects list of aikif programs to show progress and allows comments to be added to each file')
p.comment('cls_file_mapping.py', 'uses ontology to get list of files to save data')
p.comment('index.py', 'rebuilds indexes')
p.comment('dataTools.py', 'data tools to manage database access')
p.comment('generateTestData.py', 'Tool to generate various test data')
p.comment('bias.py', '[DATA] weight the validity of source data based on location, person, timing')
p.comment('cls_collect_files.py', 'duplicate - see agent filelist collecting')
p.comment('config.py', '[DATA] central point for settings in AIKIF')
p.comment('cls_log.py', 'logging function to map to standard outputs. Almost provides auto aggregation')
p.comment('mapper.py', 'maps business rules and columns of source data to standard aikif logs')
p.comment('search.py', 'command line search tool [deprecated, but will be redone]')
p.comment('tools.py', '[DATA] uses the toolbox class to create list of programs used by aikif')
p.comment('check_redis_limit.py', 'starts reddis database and tests limits by repeatedly adding data until it breaks')
p.comment('cls_data.py', 'base class for data')
p.comment('cls_dataset.py', 'functions for a schema table - progress = stub only')
p.comment('cls_datatable.py', 'functions for a single table - progress = TOK')
p.comment('cls_sql_code_generator.py', 'useful generation of SQL commands')
p.comment('world_generator.py', 'generates a 2D grid world with random terrain - land, sea, blockages')
p.comment('gui_view_world.py', 'script to read a saved grid from world.py and show in gui. Good for seeing grids larger than 80x25')
p.comment('cls_file.py', 'TOK - class for handling file details - has subclasses for test, pictures and audio')
p.comment('cls_goal.py', 'base class for managing goals')
p.comment('cls_goal_friendly.py', 'STUB - test if a goal is friendly (needs 10-40 years work to be implemented properly)')
p.comment('cls_goal_money.py', 'example based on cls_goal to manage money goals')
p.comment('cls_goal_time.py', 'example based on cls_goal to manage time goals')
p.comment('cls_plan.py', 'STUB only at this stage - this should provide the link from goals to toolbox (somewhat tricky to say the least)')
p.comment('Toolbox.py', 'class to manage the toolbox - list of programs and functions aikif can use')
p.comment('cls_grid.py', 'base class for 2D grid for games - 2048, game of life, 2D terrain maps')
p.comment('cls_grid_life.py', 'game of life game')
p.comment('test_tool.py', 'tesing toolbox (SHOULD BE IN TESTS)')
p.comment('page_about.py', 'web_aikif - generates page using flask')
p.comment('page_agents.py', 'web_aikif - generates page using flask')
p.comment('page_data.py', 'web_aikif - generates page using flask')
p.comment('web_aikif.py', 'web_aikif - generates page using flask')
p.comment('web_utils.py', 'web_aikif - generates page using flask')
p.comment('check_python_env.py', 'script to test imports to ensure all correct packages are available')
p.comment('run_tests.py', 'runs all tests in /tests subfolder')
p.comment('if_database.py', 'dataTools - interface base class to a database')
p.comment('if_mssqlserver.py', 'dataTools - interface class to a mssql database')
p.comment('if_oracle.py', 'dataTools - interface class to an oracle database')
p.comment('if_redis.py', 'dataTools - interface class to a redis database')
p.comment('agent_browser.py', 'collects data from browser - bookmarks, visited sites')
p.comment('outlook_export.py', 'agent to connect to outlook and export emails')
p.comment('run_dummy_learn_1.py', 'sample code to call a learning algorithm')
p.comment('cls_collect.py', 'collect filelists')
p.comment('cls_context.py', 'estimate what the user and PC are currently actively working on')
p.comment('cls_filelist.py', 'fileslist class')
p.comment('cls_plan_BDI.py', 'stub for planner based on belief, desire, intent')
p.comment('cls_plan_search.py', 'AI planner search functions')
p.comment('project.py', 'Core module to manage projects - meta self documentation')
p.comment('data_structures.py', 'Node and Graph classes')
p.comment('knowledge.py', 'processs raw data to information')
p.comment('core_data.py', 'classes to manage the core data types')
p.comment('if_excel.py', 'data interface to excel')
p.comment('network_tools.py', 'toolbox method to download files')
p.comment('AI_CLI.py', 'Command Line Interface (IN PROGRESS)')
p.comment('install_data.py', 'script to setup data files (IN PROGRESS)') | Document a subset of core programs with purpose (and intent) | Below is the the instruction that describes the task:
### Input:
Document a subset of core programs with purpose (and intent)
### Response:
def document_core_programs(p):
"""
Document a subset of core programs with purpose (and intent)
"""
p.comment('programs.py', 'collects list of aikif programs to show progress and allows comments to be added to each file')
p.comment('cls_file_mapping.py', 'uses ontology to get list of files to save data')
p.comment('index.py', 'rebuilds indexes')
p.comment('dataTools.py', 'data tools to manage database access')
p.comment('generateTestData.py', 'Tool to generate various test data')
p.comment('bias.py', '[DATA] weight the validity of source data based on location, person, timing')
p.comment('cls_collect_files.py', 'duplicate - see agent filelist collecting')
p.comment('config.py', '[DATA] central point for settings in AIKIF')
p.comment('cls_log.py', 'logging function to map to standard outputs. Almost provides auto aggregation')
p.comment('mapper.py', 'maps business rules and columns of source data to standard aikif logs')
p.comment('search.py', 'command line search tool [deprecated, but will be redone]')
p.comment('tools.py', '[DATA] uses the toolbox class to create list of programs used by aikif')
p.comment('check_redis_limit.py', 'starts reddis database and tests limits by repeatedly adding data until it breaks')
p.comment('cls_data.py', 'base class for data')
p.comment('cls_dataset.py', 'functions for a schema table - progress = stub only')
p.comment('cls_datatable.py', 'functions for a single table - progress = TOK')
p.comment('cls_sql_code_generator.py', 'useful generation of SQL commands')
p.comment('world_generator.py', 'generates a 2D grid world with random terrain - land, sea, blockages')
p.comment('gui_view_world.py', 'script to read a saved grid from world.py and show in gui. Good for seeing grids larger than 80x25')
p.comment('cls_file.py', 'TOK - class for handling file details - has subclasses for test, pictures and audio')
p.comment('cls_goal.py', 'base class for managing goals')
p.comment('cls_goal_friendly.py', 'STUB - test if a goal is friendly (needs 10-40 years work to be implemented properly)')
p.comment('cls_goal_money.py', 'example based on cls_goal to manage money goals')
p.comment('cls_goal_time.py', 'example based on cls_goal to manage time goals')
p.comment('cls_plan.py', 'STUB only at this stage - this should provide the link from goals to toolbox (somewhat tricky to say the least)')
p.comment('Toolbox.py', 'class to manage the toolbox - list of programs and functions aikif can use')
p.comment('cls_grid.py', 'base class for 2D grid for games - 2048, game of life, 2D terrain maps')
p.comment('cls_grid_life.py', 'game of life game')
p.comment('test_tool.py', 'tesing toolbox (SHOULD BE IN TESTS)')
p.comment('page_about.py', 'web_aikif - generates page using flask')
p.comment('page_agents.py', 'web_aikif - generates page using flask')
p.comment('page_data.py', 'web_aikif - generates page using flask')
p.comment('web_aikif.py', 'web_aikif - generates page using flask')
p.comment('web_utils.py', 'web_aikif - generates page using flask')
p.comment('check_python_env.py', 'script to test imports to ensure all correct packages are available')
p.comment('run_tests.py', 'runs all tests in /tests subfolder')
p.comment('if_database.py', 'dataTools - interface base class to a database')
p.comment('if_mssqlserver.py', 'dataTools - interface class to a mssql database')
p.comment('if_oracle.py', 'dataTools - interface class to an oracle database')
p.comment('if_redis.py', 'dataTools - interface class to a redis database')
p.comment('agent_browser.py', 'collects data from browser - bookmarks, visited sites')
p.comment('outlook_export.py', 'agent to connect to outlook and export emails')
p.comment('run_dummy_learn_1.py', 'sample code to call a learning algorithm')
p.comment('cls_collect.py', 'collect filelists')
p.comment('cls_context.py', 'estimate what the user and PC are currently actively working on')
p.comment('cls_filelist.py', 'fileslist class')
p.comment('cls_plan_BDI.py', 'stub for planner based on belief, desire, intent')
p.comment('cls_plan_search.py', 'AI planner search functions')
p.comment('project.py', 'Core module to manage projects - meta self documentation')
p.comment('data_structures.py', 'Node and Graph classes')
p.comment('knowledge.py', 'processs raw data to information')
p.comment('core_data.py', 'classes to manage the core data types')
p.comment('if_excel.py', 'data interface to excel')
p.comment('network_tools.py', 'toolbox method to download files')
p.comment('AI_CLI.py', 'Command Line Interface (IN PROGRESS)')
p.comment('install_data.py', 'script to setup data files (IN PROGRESS)') |
def on_train_begin(self, **kwargs: Any) -> None:
"Prepare MLflow experiment and log params"
self.client = mlflow.tracking.MlflowClient(self.uri)
exp = self.client.get_experiment_by_name(self.exp_name)
self.exp_id = self.client.create_experiment(self.exp_name) if exp is None else exp.experiment_id
run = self.client.create_run(experiment_id=self.exp_id)
self.run = run.info.run_uuid
for k,v in self.params.items():
self.client.log_param(run_id=self.run, key=k, value=v) | Prepare MLflow experiment and log params | Below is the the instruction that describes the task:
### Input:
Prepare MLflow experiment and log params
### Response:
def on_train_begin(self, **kwargs: Any) -> None:
"Prepare MLflow experiment and log params"
self.client = mlflow.tracking.MlflowClient(self.uri)
exp = self.client.get_experiment_by_name(self.exp_name)
self.exp_id = self.client.create_experiment(self.exp_name) if exp is None else exp.experiment_id
run = self.client.create_run(experiment_id=self.exp_id)
self.run = run.info.run_uuid
for k,v in self.params.items():
self.client.log_param(run_id=self.run, key=k, value=v) |
def _do_config_packet(self, packet, ip, port):
"""
Apply config to this instance
:param packet: Packet with config
:type packet: paps.si.app.message.APPMessage
:param ip: Ip of server
:type ip: str
:param port: Port of server
:type port: int
:rtype: None
"""
self.debug("()")
if packet.header.device_id != Id.SERVER:
# Only allow config packets from server
self.warning("Config packets only allowed from server")
return
try:
config = packet.payload
self.debug(u"{}".format(config))
if not isinstance(config, dict):
self.error("Wrong payload type")
raise RuntimeError("Wrong type")
config.setdefault("server_ip", ip)
config.setdefault("server_port", port)
self.config(config)
self._joined.set()
except:
self.exception("Failed to configure")
self.error(u"Faulty packet {}".format(format_data(packet.payload)))
return | Apply config to this instance
:param packet: Packet with config
:type packet: paps.si.app.message.APPMessage
:param ip: Ip of server
:type ip: str
:param port: Port of server
:type port: int
:rtype: None | Below is the the instruction that describes the task:
### Input:
Apply config to this instance
:param packet: Packet with config
:type packet: paps.si.app.message.APPMessage
:param ip: Ip of server
:type ip: str
:param port: Port of server
:type port: int
:rtype: None
### Response:
def _do_config_packet(self, packet, ip, port):
"""
Apply config to this instance
:param packet: Packet with config
:type packet: paps.si.app.message.APPMessage
:param ip: Ip of server
:type ip: str
:param port: Port of server
:type port: int
:rtype: None
"""
self.debug("()")
if packet.header.device_id != Id.SERVER:
# Only allow config packets from server
self.warning("Config packets only allowed from server")
return
try:
config = packet.payload
self.debug(u"{}".format(config))
if not isinstance(config, dict):
self.error("Wrong payload type")
raise RuntimeError("Wrong type")
config.setdefault("server_ip", ip)
config.setdefault("server_port", port)
self.config(config)
self._joined.set()
except:
self.exception("Failed to configure")
self.error(u"Faulty packet {}".format(format_data(packet.payload)))
return |
def nl_list_for_each_entry_safe(pos, n, head, member):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L84.
Positional arguments:
pos -- class instance holding an nl_list_head instance.
n -- class instance holding an nl_list_head instance.
head -- nl_list_head class instance.
member -- attribute (string).
Returns:
Generator yielding a class instances.
"""
pos = nl_list_entry(head.next_, type(pos), member)
n = nl_list_entry(pos.member.next_, type(pos), member)
while True:
yield pos
if getattr(pos, member) != head:
pos = n
n = nl_list_entry(n.member.next_, type(n), member)
continue
break | https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L84.
Positional arguments:
pos -- class instance holding an nl_list_head instance.
n -- class instance holding an nl_list_head instance.
head -- nl_list_head class instance.
member -- attribute (string).
Returns:
Generator yielding a class instances. | Below is the the instruction that describes the task:
### Input:
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L84.
Positional arguments:
pos -- class instance holding an nl_list_head instance.
n -- class instance holding an nl_list_head instance.
head -- nl_list_head class instance.
member -- attribute (string).
Returns:
Generator yielding a class instances.
### Response:
def nl_list_for_each_entry_safe(pos, n, head, member):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/list.h#L84.
Positional arguments:
pos -- class instance holding an nl_list_head instance.
n -- class instance holding an nl_list_head instance.
head -- nl_list_head class instance.
member -- attribute (string).
Returns:
Generator yielding a class instances.
"""
pos = nl_list_entry(head.next_, type(pos), member)
n = nl_list_entry(pos.member.next_, type(pos), member)
while True:
yield pos
if getattr(pos, member) != head:
pos = n
n = nl_list_entry(n.member.next_, type(n), member)
continue
break |
def _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna):
""" numba implementation of antenna_uvw """
if antenna1.ndim != 1:
raise ValueError("antenna1 shape should be (row,)")
if antenna2.ndim != 1:
raise ValueError("antenna2 shape should be (row,)")
if uvw.ndim != 2 or uvw.shape[1] != 3:
raise ValueError("uvw shape should be (row, 3)")
if not (uvw.shape[0] == antenna1.shape[0] == antenna2.shape[0]):
raise ValueError("First dimension of uvw, antenna1 "
"and antenna2 do not match")
if chunks.ndim != 1:
raise ValueError("chunks shape should be (utime,)")
if nr_of_antenna < 1:
raise ValueError("nr_of_antenna < 1")
ant_uvw_shape = (chunks.shape[0], nr_of_antenna, 3)
antenna_uvw = np.full(ant_uvw_shape, np.nan, dtype=uvw.dtype)
start = 0
for ci, chunk in enumerate(chunks):
end = start + chunk
# one pass should be enough!
_antenna_uvw_loop(uvw, antenna1, antenna2, antenna_uvw, ci, start, end)
start = end
return antenna_uvw | numba implementation of antenna_uvw | Below is the the instruction that describes the task:
### Input:
numba implementation of antenna_uvw
### Response:
def _antenna_uvw(uvw, antenna1, antenna2, chunks, nr_of_antenna):
""" numba implementation of antenna_uvw """
if antenna1.ndim != 1:
raise ValueError("antenna1 shape should be (row,)")
if antenna2.ndim != 1:
raise ValueError("antenna2 shape should be (row,)")
if uvw.ndim != 2 or uvw.shape[1] != 3:
raise ValueError("uvw shape should be (row, 3)")
if not (uvw.shape[0] == antenna1.shape[0] == antenna2.shape[0]):
raise ValueError("First dimension of uvw, antenna1 "
"and antenna2 do not match")
if chunks.ndim != 1:
raise ValueError("chunks shape should be (utime,)")
if nr_of_antenna < 1:
raise ValueError("nr_of_antenna < 1")
ant_uvw_shape = (chunks.shape[0], nr_of_antenna, 3)
antenna_uvw = np.full(ant_uvw_shape, np.nan, dtype=uvw.dtype)
start = 0
for ci, chunk in enumerate(chunks):
end = start + chunk
# one pass should be enough!
_antenna_uvw_loop(uvw, antenna1, antenna2, antenna_uvw, ci, start, end)
start = end
return antenna_uvw |
def run(self, request, tempdir, opts):
"""
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
"""
with open(os.path.join(self.workdir, "request.json"), "w") as f:
json.dump(request, f)
with open(os.path.join(self.workdir, "cwl.input.json"), "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
workflow_url = request.get("workflow_url") # Will always be local path to descriptor cwl, or url.
output = open(os.path.join(self.workdir, "cwl.output.json"), "w")
stderr = open(os.path.join(self.workdir, "stderr"), "w")
runner = opts.getopt("runner", default="cwl-runner")
extra = opts.getoptlist("extra")
# replace any locally specified outdir with the default
for e in extra:
if e.startswith('--outdir='):
extra.remove(e)
extra.append('--outdir=' + self.outdir)
# link the cwl and json into the tempdir/cwd
if workflow_url.startswith('file://'):
os.symlink(workflow_url[7:], os.path.join(tempdir, "wes_workflow.cwl"))
workflow_url = os.path.join(tempdir, "wes_workflow.cwl")
os.symlink(inputtemp.name, os.path.join(tempdir, "cwl.input.json"))
jsonpath = os.path.join(tempdir, "cwl.input.json")
# build args and run
command_args = [runner] + extra + [workflow_url, jsonpath]
proc = subprocess.Popen(command_args,
stdout=output,
stderr=stderr,
close_fds=True,
cwd=tempdir)
output.close()
stderr.close()
with open(os.path.join(self.workdir, "pid"), "w") as pid:
pid.write(str(proc.pid))
return self.getstatus() | Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state} | Below is the the instruction that describes the task:
### Input:
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
### Response:
def run(self, request, tempdir, opts):
"""
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
"""
with open(os.path.join(self.workdir, "request.json"), "w") as f:
json.dump(request, f)
with open(os.path.join(self.workdir, "cwl.input.json"), "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
workflow_url = request.get("workflow_url") # Will always be local path to descriptor cwl, or url.
output = open(os.path.join(self.workdir, "cwl.output.json"), "w")
stderr = open(os.path.join(self.workdir, "stderr"), "w")
runner = opts.getopt("runner", default="cwl-runner")
extra = opts.getoptlist("extra")
# replace any locally specified outdir with the default
for e in extra:
if e.startswith('--outdir='):
extra.remove(e)
extra.append('--outdir=' + self.outdir)
# link the cwl and json into the tempdir/cwd
if workflow_url.startswith('file://'):
os.symlink(workflow_url[7:], os.path.join(tempdir, "wes_workflow.cwl"))
workflow_url = os.path.join(tempdir, "wes_workflow.cwl")
os.symlink(inputtemp.name, os.path.join(tempdir, "cwl.input.json"))
jsonpath = os.path.join(tempdir, "cwl.input.json")
# build args and run
command_args = [runner] + extra + [workflow_url, jsonpath]
proc = subprocess.Popen(command_args,
stdout=output,
stderr=stderr,
close_fds=True,
cwd=tempdir)
output.close()
stderr.close()
with open(os.path.join(self.workdir, "pid"), "w") as pid:
pid.write(str(proc.pid))
return self.getstatus() |
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx) | Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)] | Below is the the instruction that describes the task:
### Input:
Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
### Response:
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions can be:
1. built-in aggregation functions, such as `avg`, `max`, `min`, `sum`, `count`
2. group aggregate pandas UDFs, created with :func:`pyspark.sql.functions.pandas_udf`
.. note:: There is no partial aggregation with group aggregate UDFs, i.e.,
a full shuffle is required. Also, all the data of a group will be loaded into
memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. seealso:: :func:`pyspark.sql.functions.pandas_udf`
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
.. note:: Built-in aggregation functions and group aggregate pandas UDFs cannot be mixed
in a single call to this function.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> sorted(gdf.agg({"*": "count"}).collect())
[Row(name=u'Alice', count(1)=1), Row(name=u'Bob', count(1)=1)]
>>> from pyspark.sql import functions as F
>>> sorted(gdf.agg(F.min(df.age)).collect())
[Row(name=u'Alice', min(age)=2), Row(name=u'Bob', min(age)=5)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf('int', PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def min_udf(v):
... return v.min()
>>> sorted(gdf.agg(min_udf(df.age)).collect()) # doctest: +SKIP
[Row(name=u'Alice', min_udf(age)=2), Row(name=u'Bob', min_udf(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jgd.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jgd.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx) |
def info_to_datatype_v4(signed, little_endian):
"""map CAN signal to MDF integer types
Parameters
----------
signed : bool
signal is flagged as signed in the CAN database
little_endian : bool
signal is flagged as little endian (Intel) in the CAN database
Returns
-------
datatype : int
integer code for MDF channel data type
"""
if signed:
if little_endian:
datatype = v4c.DATA_TYPE_SIGNED_INTEL
else:
datatype = v4c.DATA_TYPE_SIGNED_MOTOROLA
else:
if little_endian:
datatype = v4c.DATA_TYPE_UNSIGNED_INTEL
else:
datatype = v4c.DATA_TYPE_UNSIGNED_MOTOROLA
return datatype | map CAN signal to MDF integer types
Parameters
----------
signed : bool
signal is flagged as signed in the CAN database
little_endian : bool
signal is flagged as little endian (Intel) in the CAN database
Returns
-------
datatype : int
integer code for MDF channel data type | Below is the the instruction that describes the task:
### Input:
map CAN signal to MDF integer types
Parameters
----------
signed : bool
signal is flagged as signed in the CAN database
little_endian : bool
signal is flagged as little endian (Intel) in the CAN database
Returns
-------
datatype : int
integer code for MDF channel data type
### Response:
def info_to_datatype_v4(signed, little_endian):
"""map CAN signal to MDF integer types
Parameters
----------
signed : bool
signal is flagged as signed in the CAN database
little_endian : bool
signal is flagged as little endian (Intel) in the CAN database
Returns
-------
datatype : int
integer code for MDF channel data type
"""
if signed:
if little_endian:
datatype = v4c.DATA_TYPE_SIGNED_INTEL
else:
datatype = v4c.DATA_TYPE_SIGNED_MOTOROLA
else:
if little_endian:
datatype = v4c.DATA_TYPE_UNSIGNED_INTEL
else:
datatype = v4c.DATA_TYPE_UNSIGNED_MOTOROLA
return datatype |
def image_exists(self, image_name, tag='latest'):
"""
:param image_name:
:return: True the image_name location in docker.neg pos
"""
code, image = self.image_tags(image_name)
if code != httplib.OK:
return False
tag = tag.lower()
return any(x.lower() == tag for x in image.tags) | :param image_name:
:return: True the image_name location in docker.neg pos | Below is the the instruction that describes the task:
### Input:
:param image_name:
:return: True the image_name location in docker.neg pos
### Response:
def image_exists(self, image_name, tag='latest'):
"""
:param image_name:
:return: True the image_name location in docker.neg pos
"""
code, image = self.image_tags(image_name)
if code != httplib.OK:
return False
tag = tag.lower()
return any(x.lower() == tag for x in image.tags) |
def _integrate_plugins():
"""Integrate plugins to the context"""
import sys
from airflow.plugins_manager import operators_modules
for operators_module in operators_modules:
sys.modules[operators_module.__name__] = operators_module
globals()[operators_module._name] = operators_module | Integrate plugins to the context | Below is the the instruction that describes the task:
### Input:
Integrate plugins to the context
### Response:
def _integrate_plugins():
"""Integrate plugins to the context"""
import sys
from airflow.plugins_manager import operators_modules
for operators_module in operators_modules:
sys.modules[operators_module.__name__] = operators_module
globals()[operators_module._name] = operators_module |
async def update_trend_data(self, startdate, enddate):
"""Update trends data json for specified time period."""
url = '{}/users/{}/trends'.format(API_URL, self.userid)
params = {
'tz': self.device.tzone,
'from': startdate,
'to': enddate
}
trends = await self.device.api_get(url, params)
if trends is None:
_LOGGER.error('Unable to fetch eight trend data.')
else:
self.trends = trends['days'] | Update trends data json for specified time period. | Below is the the instruction that describes the task:
### Input:
Update trends data json for specified time period.
### Response:
async def update_trend_data(self, startdate, enddate):
"""Update trends data json for specified time period."""
url = '{}/users/{}/trends'.format(API_URL, self.userid)
params = {
'tz': self.device.tzone,
'from': startdate,
'to': enddate
}
trends = await self.device.api_get(url, params)
if trends is None:
_LOGGER.error('Unable to fetch eight trend data.')
else:
self.trends = trends['days'] |
def filter_off(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
try:
self.data[s].filt.off(analyte, filt)
except:
warnings.warn("filt.off failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return | Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
### Response:
def filter_off(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
try:
self.data[s].filt.off(analyte, filt)
except:
warnings.warn("filt.off failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return |
def transmute(df, *keep_columns, **kwargs):
"""
Creates columns and then returns those new columns and optionally specified
original columns from the DataFrame.
This works like `mutate`, but designed to discard the original columns used
to create the new ones.
Args:
*keep_columns: Column labels to keep. Can be string, symbolic, or
integer position.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)
y_div_z x_plus_y
0 1.637860 7.93
1 1.662338 7.73
2 1.761905 8.12
"""
keep_cols = []
for col in flatten(keep_columns):
try:
keep_cols.append(col.name)
except:
if isinstance(col, str):
keep_cols.append(col)
elif isinstance(col, int):
keep_cols.append(df.columns[col])
df = df.assign(**kwargs)
columns = [k for k in kwargs.keys()] + list(keep_cols)
return df[columns] | Creates columns and then returns those new columns and optionally specified
original columns from the DataFrame.
This works like `mutate`, but designed to discard the original columns used
to create the new ones.
Args:
*keep_columns: Column labels to keep. Can be string, symbolic, or
integer position.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)
y_div_z x_plus_y
0 1.637860 7.93
1 1.662338 7.73
2 1.761905 8.12 | Below is the the instruction that describes the task:
### Input:
Creates columns and then returns those new columns and optionally specified
original columns from the DataFrame.
This works like `mutate`, but designed to discard the original columns used
to create the new ones.
Args:
*keep_columns: Column labels to keep. Can be string, symbolic, or
integer position.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)
y_div_z x_plus_y
0 1.637860 7.93
1 1.662338 7.73
2 1.761905 8.12
### Response:
def transmute(df, *keep_columns, **kwargs):
"""
Creates columns and then returns those new columns and optionally specified
original columns from the DataFrame.
This works like `mutate`, but designed to discard the original columns used
to create the new ones.
Args:
*keep_columns: Column labels to keep. Can be string, symbolic, or
integer position.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)
y_div_z x_plus_y
0 1.637860 7.93
1 1.662338 7.73
2 1.761905 8.12
"""
keep_cols = []
for col in flatten(keep_columns):
try:
keep_cols.append(col.name)
except:
if isinstance(col, str):
keep_cols.append(col)
elif isinstance(col, int):
keep_cols.append(df.columns[col])
df = df.assign(**kwargs)
columns = [k for k in kwargs.keys()] + list(keep_cols)
return df[columns] |
def add_years(date_obj, years_int):
"""
addition of a number of years
:param BaseDateTuple d:
:param int years_int:
:return BaseDatetimeDate:
"""
y, m, d = BaseDateTuple.to_ymd(date_obj)
y += years_int
if not is_leap_year(y) and m == 2:
d = min(28, d)
return BaseDateTuple.from_ymd(y, m, d) | addition of a number of years
:param BaseDateTuple d:
:param int years_int:
:return BaseDatetimeDate: | Below is the the instruction that describes the task:
### Input:
addition of a number of years
:param BaseDateTuple d:
:param int years_int:
:return BaseDatetimeDate:
### Response:
def add_years(date_obj, years_int):
"""
addition of a number of years
:param BaseDateTuple d:
:param int years_int:
:return BaseDatetimeDate:
"""
y, m, d = BaseDateTuple.to_ymd(date_obj)
y += years_int
if not is_leap_year(y) and m == 2:
d = min(28, d)
return BaseDateTuple.from_ymd(y, m, d) |
def parse_samtools_stats(self):
""" Find Samtools stats logs and parse their data """
self.samtools_stats = dict()
for f in self.find_log_files('samtools/stats'):
parsed_data = dict()
for line in f['f'].splitlines():
if not line.startswith("SN"):
continue
sections = line.split("\t")
field = sections[1].strip()[:-1]
field = field.replace(' ', '_')
value = float(sections[2].strip())
parsed_data[field] = value
if len(parsed_data) > 0:
# Work out some percentages
if 'raw_total_sequences' in parsed_data:
for k in list(parsed_data.keys()):
if k.startswith('reads_') and k != 'raw_total_sequences' and parsed_data['raw_total_sequences'] > 0:
parsed_data['{}_percent'.format(k)] = (parsed_data[k] / parsed_data['raw_total_sequences']) * 100
if f['s_name'] in self.samtools_stats:
log.debug("Duplicate sample name found! Overwriting: {}"
.format(f['s_name']))
self.add_data_source(f, section='stats')
self.samtools_stats[f['s_name']] = parsed_data
# Filter to strip out ignored sample names
self.samtools_stats = self.ignore_samples(self.samtools_stats)
if len(self.samtools_stats) > 0:
# Write parsed report data to a file
self.write_data_file(self.samtools_stats, 'multiqc_samtools_stats')
# General Stats Table
stats_headers = OrderedDict()
stats_headers['error_rate'] = {
'title': 'Error rate',
'description': 'Error rate: mismatches (NM) / bases mapped (CIGAR)',
'min': 0,
'max': 100,
'suffix': '%',
'scale': 'OrRd',
'format': '{:,.2f}',
'modify': lambda x: x * 100.0
}
stats_headers['non-primary_alignments'] = {
'title': '{} Non-Primary'.format(config.read_count_prefix),
'description': 'Non-primary alignments ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuBu',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
stats_headers['reads_mapped'] = {
'title': '{} Reads Mapped'.format(config.read_count_prefix),
'description': 'Reads Mapped in the bam file ({})'.format(config.read_count_desc),
'min': 0,
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
stats_headers['reads_mapped_percent'] = {
'title': '% Mapped',
'description': '% Mapped Reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn'
}
stats_headers['reads_properly_paired_percent'] = {
'title': '% Proper Pairs',
'description': '% Properly Paired Reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn',
'hidden': True if (max([x['reads_mapped_and_paired'] for x in self.samtools_stats.values()]) == 0) else False
}
stats_headers['reads_MQ0_percent'] = {
'title': '% MapQ 0 Reads',
'description': '% of Reads that are Ambiguously Placed (MapQ=0)',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'OrRd',
'hidden': True
}
stats_headers['raw_total_sequences'] = {
'title': '{} Total seqs'.format(config.read_count_prefix),
'description': 'Total sequences in the bam file ({})'.format(config.read_count_desc),
'min': 0,
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.samtools_stats, stats_headers, 'Samtools Stats')
# Make bargraph plot of mapped/unmapped reads
self.alignment_section(self.samtools_stats)
# Make dot plot of counts
keys = OrderedDict()
reads = {
'min': 0,
'modify': lambda x: float(x) / 1000000.0,
'suffix': 'M reads',
'decimalPlaces': 2,
'shared_key': 'read_count'
}
bases = {
'min': 0,
'modify': lambda x: float(x) / 1000000.0,
'suffix': 'M bases',
'decimalPlaces': 2,
'shared_key': 'base_count'
}
keys['raw_total_sequences'] = dict(reads, **{'title': 'Total sequences'})
keys['reads_mapped_and_paired'] = dict(reads, **{'title': 'Mapped & paired', 'description': 'Paired-end technology bit set + both mates mapped' })
keys['reads_properly_paired'] = dict(reads, **{'title': 'Properly paired', 'description': 'Proper-pair bit set'})
keys['reads_duplicated'] = dict(reads, **{'title': 'Duplicated', 'description': 'PCR or optical duplicate bit set'})
keys['reads_QC_failed'] = dict(reads, **{'title': 'QC Failed'})
keys['reads_MQ0'] = dict(reads, **{'title': 'Reads MQ0', 'description': 'Reads mapped and MQ=0'})
keys['bases_mapped_(cigar)'] = dict(bases, **{'title': 'Mapped bases (CIGAR)', 'description': 'Mapped bases (CIGAR)'})
keys['bases_trimmed'] = dict(bases, **{'title': 'Bases Trimmed'})
keys['bases_duplicated'] = dict(bases, **{'title': 'Duplicated bases'})
keys['pairs_on_different_chromosomes'] = dict(reads, **{'title': 'Diff chromosomes', 'description': 'Pairs on different chromosomes'})
keys['pairs_with_other_orientation'] = dict(reads, **{'title': 'Other orientation', 'description': 'Pairs with other orientation'})
keys['inward_oriented_pairs'] = dict(reads, **{'title': 'Inward pairs', 'description': 'Inward oriented pairs'})
keys['outward_oriented_pairs'] = dict(reads, **{'title': 'Outward pairs', 'description': 'Outward oriented pairs'})
self.add_section (
name = 'Alignment metrics',
anchor = 'samtools-stats',
description = "This module parses the output from <code>samtools stats</code>. All numbers in millions.",
plot = beeswarm.plot(self.samtools_stats, keys, {'id': 'samtools-stats-dp'})
)
# Return the number of logs that were found
return len(self.samtools_stats) | Find Samtools stats logs and parse their data | Below is the the instruction that describes the task:
### Input:
Find Samtools stats logs and parse their data
### Response:
def parse_samtools_stats(self):
""" Find Samtools stats logs and parse their data """
self.samtools_stats = dict()
for f in self.find_log_files('samtools/stats'):
parsed_data = dict()
for line in f['f'].splitlines():
if not line.startswith("SN"):
continue
sections = line.split("\t")
field = sections[1].strip()[:-1]
field = field.replace(' ', '_')
value = float(sections[2].strip())
parsed_data[field] = value
if len(parsed_data) > 0:
# Work out some percentages
if 'raw_total_sequences' in parsed_data:
for k in list(parsed_data.keys()):
if k.startswith('reads_') and k != 'raw_total_sequences' and parsed_data['raw_total_sequences'] > 0:
parsed_data['{}_percent'.format(k)] = (parsed_data[k] / parsed_data['raw_total_sequences']) * 100
if f['s_name'] in self.samtools_stats:
log.debug("Duplicate sample name found! Overwriting: {}"
.format(f['s_name']))
self.add_data_source(f, section='stats')
self.samtools_stats[f['s_name']] = parsed_data
# Filter to strip out ignored sample names
self.samtools_stats = self.ignore_samples(self.samtools_stats)
if len(self.samtools_stats) > 0:
# Write parsed report data to a file
self.write_data_file(self.samtools_stats, 'multiqc_samtools_stats')
# General Stats Table
stats_headers = OrderedDict()
stats_headers['error_rate'] = {
'title': 'Error rate',
'description': 'Error rate: mismatches (NM) / bases mapped (CIGAR)',
'min': 0,
'max': 100,
'suffix': '%',
'scale': 'OrRd',
'format': '{:,.2f}',
'modify': lambda x: x * 100.0
}
stats_headers['non-primary_alignments'] = {
'title': '{} Non-Primary'.format(config.read_count_prefix),
'description': 'Non-primary alignments ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuBu',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
stats_headers['reads_mapped'] = {
'title': '{} Reads Mapped'.format(config.read_count_prefix),
'description': 'Reads Mapped in the bam file ({})'.format(config.read_count_desc),
'min': 0,
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
stats_headers['reads_mapped_percent'] = {
'title': '% Mapped',
'description': '% Mapped Reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn'
}
stats_headers['reads_properly_paired_percent'] = {
'title': '% Proper Pairs',
'description': '% Properly Paired Reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn',
'hidden': True if (max([x['reads_mapped_and_paired'] for x in self.samtools_stats.values()]) == 0) else False
}
stats_headers['reads_MQ0_percent'] = {
'title': '% MapQ 0 Reads',
'description': '% of Reads that are Ambiguously Placed (MapQ=0)',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'OrRd',
'hidden': True
}
stats_headers['raw_total_sequences'] = {
'title': '{} Total seqs'.format(config.read_count_prefix),
'description': 'Total sequences in the bam file ({})'.format(config.read_count_desc),
'min': 0,
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.samtools_stats, stats_headers, 'Samtools Stats')
# Make bargraph plot of mapped/unmapped reads
self.alignment_section(self.samtools_stats)
# Make dot plot of counts
keys = OrderedDict()
reads = {
'min': 0,
'modify': lambda x: float(x) / 1000000.0,
'suffix': 'M reads',
'decimalPlaces': 2,
'shared_key': 'read_count'
}
bases = {
'min': 0,
'modify': lambda x: float(x) / 1000000.0,
'suffix': 'M bases',
'decimalPlaces': 2,
'shared_key': 'base_count'
}
keys['raw_total_sequences'] = dict(reads, **{'title': 'Total sequences'})
keys['reads_mapped_and_paired'] = dict(reads, **{'title': 'Mapped & paired', 'description': 'Paired-end technology bit set + both mates mapped' })
keys['reads_properly_paired'] = dict(reads, **{'title': 'Properly paired', 'description': 'Proper-pair bit set'})
keys['reads_duplicated'] = dict(reads, **{'title': 'Duplicated', 'description': 'PCR or optical duplicate bit set'})
keys['reads_QC_failed'] = dict(reads, **{'title': 'QC Failed'})
keys['reads_MQ0'] = dict(reads, **{'title': 'Reads MQ0', 'description': 'Reads mapped and MQ=0'})
keys['bases_mapped_(cigar)'] = dict(bases, **{'title': 'Mapped bases (CIGAR)', 'description': 'Mapped bases (CIGAR)'})
keys['bases_trimmed'] = dict(bases, **{'title': 'Bases Trimmed'})
keys['bases_duplicated'] = dict(bases, **{'title': 'Duplicated bases'})
keys['pairs_on_different_chromosomes'] = dict(reads, **{'title': 'Diff chromosomes', 'description': 'Pairs on different chromosomes'})
keys['pairs_with_other_orientation'] = dict(reads, **{'title': 'Other orientation', 'description': 'Pairs with other orientation'})
keys['inward_oriented_pairs'] = dict(reads, **{'title': 'Inward pairs', 'description': 'Inward oriented pairs'})
keys['outward_oriented_pairs'] = dict(reads, **{'title': 'Outward pairs', 'description': 'Outward oriented pairs'})
self.add_section (
name = 'Alignment metrics',
anchor = 'samtools-stats',
description = "This module parses the output from <code>samtools stats</code>. All numbers in millions.",
plot = beeswarm.plot(self.samtools_stats, keys, {'id': 'samtools-stats-dp'})
)
# Return the number of logs that were found
return len(self.samtools_stats) |
def set_load_power(self, power_watts):
"""
Changes load to power mode and sets power value.
Rounds to nearest 0.1W.
:param power_watts: Power in Watts (0-200)
:return:
"""
new_val = int(round(power_watts * 10))
if not 0 <= new_val <= 2000:
raise ValueError("Load Power should be between 0-200 W")
self._load_mode = self.SET_TYPE_POWER
self._load_value = new_val
self.__set_parameters() | Changes load to power mode and sets power value.
Rounds to nearest 0.1W.
:param power_watts: Power in Watts (0-200)
:return: | Below is the the instruction that describes the task:
### Input:
Changes load to power mode and sets power value.
Rounds to nearest 0.1W.
:param power_watts: Power in Watts (0-200)
:return:
### Response:
def set_load_power(self, power_watts):
"""
Changes load to power mode and sets power value.
Rounds to nearest 0.1W.
:param power_watts: Power in Watts (0-200)
:return:
"""
new_val = int(round(power_watts * 10))
if not 0 <= new_val <= 2000:
raise ValueError("Load Power should be between 0-200 W")
self._load_mode = self.SET_TYPE_POWER
self._load_value = new_val
self.__set_parameters() |
def _handle_resize(self, signum=None, frame=None):
'Tries to catch resize signals sent from the terminal.'
w, h = utils.get_terminal_size()
self.term_width = w | Tries to catch resize signals sent from the terminal. | Below is the the instruction that describes the task:
### Input:
Tries to catch resize signals sent from the terminal.
### Response:
def _handle_resize(self, signum=None, frame=None):
'Tries to catch resize signals sent from the terminal.'
w, h = utils.get_terminal_size()
self.term_width = w |
def as_unicode(self):
"""Unicode string JID representation.
:return: JID as Unicode string."""
result = self.domain
if self.local:
result = self.local + u'@' + result
if self.resource:
result = result + u'/' + self.resource
if not JID.cache.has_key(result):
JID.cache[result] = self
return result | Unicode string JID representation.
:return: JID as Unicode string. | Below is the the instruction that describes the task:
### Input:
Unicode string JID representation.
:return: JID as Unicode string.
### Response:
def as_unicode(self):
"""Unicode string JID representation.
:return: JID as Unicode string."""
result = self.domain
if self.local:
result = self.local + u'@' + result
if self.resource:
result = result + u'/' + self.resource
if not JID.cache.has_key(result):
JID.cache[result] = self
return result |
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(), "efermi": self.efermi,
"kpoints": []}
# kpoints are not kpoint objects dicts but are frac coords (this makes
# the dict smaller and avoids the repetition of the lattice
for k in self.kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["branches"] = self.branches
d["bands"] = {str(int(spin)): self.bands[spin].tolist()
for spin in self.bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): v.tolist() for spin, v in vbm[
'projections'].items()}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): v.tolist() for spin, v in cbm[
'projections'].items()}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
# MongoDB does not accept keys starting with $. Add a blanck space to fix the problem
for c in self.labels_dict:
mongo_key = c if not c.startswith("$") else " " + c
d['labels_dict'][mongo_key] = self.labels_dict[c].as_dict()[
'fcoords']
if len(self.projections) != 0:
d['structure'] = self.structure.as_dict()
d['projections'] = {str(int(spin)): np.array(v).tolist()
for spin, v in self.projections.items()}
return d | Json-serializable dict representation of BandStructureSymmLine. | Below is the the instruction that describes the task:
### Input:
Json-serializable dict representation of BandStructureSymmLine.
### Response:
def as_dict(self):
"""
Json-serializable dict representation of BandStructureSymmLine.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice_rec": self.lattice_rec.as_dict(), "efermi": self.efermi,
"kpoints": []}
# kpoints are not kpoint objects dicts but are frac coords (this makes
# the dict smaller and avoids the repetition of the lattice
for k in self.kpoints:
d["kpoints"].append(k.as_dict()["fcoords"])
d["branches"] = self.branches
d["bands"] = {str(int(spin)): self.bands[spin].tolist()
for spin in self.bands}
d["is_metal"] = self.is_metal()
vbm = self.get_vbm()
d["vbm"] = {"energy": vbm["energy"],
"kpoint_index": vbm["kpoint_index"],
"band_index": {str(int(spin)): vbm["band_index"][spin]
for spin in vbm["band_index"]},
'projections': {str(spin): v.tolist() for spin, v in vbm[
'projections'].items()}}
cbm = self.get_cbm()
d['cbm'] = {'energy': cbm['energy'],
'kpoint_index': cbm['kpoint_index'],
'band_index': {str(int(spin)): cbm['band_index'][spin]
for spin in cbm['band_index']},
'projections': {str(spin): v.tolist() for spin, v in cbm[
'projections'].items()}}
d['band_gap'] = self.get_band_gap()
d['labels_dict'] = {}
d['is_spin_polarized'] = self.is_spin_polarized
# MongoDB does not accept keys starting with $. Add a blanck space to fix the problem
for c in self.labels_dict:
mongo_key = c if not c.startswith("$") else " " + c
d['labels_dict'][mongo_key] = self.labels_dict[c].as_dict()[
'fcoords']
if len(self.projections) != 0:
d['structure'] = self.structure.as_dict()
d['projections'] = {str(int(spin)): np.array(v).tolist()
for spin, v in self.projections.items()}
return d |
def get_url_kwargs(self, request_kwargs=None, **kwargs):
"""
If request_kwargs is not specified, self.kwargs is used instead.
If 'object' is one of the kwargs passed. Replaces it with
the value of 'self.slug_field' on the given object.
"""
if not request_kwargs:
request_kwargs = getattr(self, 'kwargs', {})
kwargs = super(ModelCMSView, self).get_url_kwargs(request_kwargs,
**kwargs)
obj = kwargs.pop('object', None)
if obj:
kwargs[self.slug_url_kwarg] = getattr(obj, self.slug_field, None)
elif self.slug_url_kwarg in request_kwargs:
kwargs[self.slug_url_kwarg] = request_kwargs[self.slug_url_kwarg]
return kwargs | If request_kwargs is not specified, self.kwargs is used instead.
If 'object' is one of the kwargs passed. Replaces it with
the value of 'self.slug_field' on the given object. | Below is the the instruction that describes the task:
### Input:
If request_kwargs is not specified, self.kwargs is used instead.
If 'object' is one of the kwargs passed. Replaces it with
the value of 'self.slug_field' on the given object.
### Response:
def get_url_kwargs(self, request_kwargs=None, **kwargs):
"""
If request_kwargs is not specified, self.kwargs is used instead.
If 'object' is one of the kwargs passed. Replaces it with
the value of 'self.slug_field' on the given object.
"""
if not request_kwargs:
request_kwargs = getattr(self, 'kwargs', {})
kwargs = super(ModelCMSView, self).get_url_kwargs(request_kwargs,
**kwargs)
obj = kwargs.pop('object', None)
if obj:
kwargs[self.slug_url_kwarg] = getattr(obj, self.slug_field, None)
elif self.slug_url_kwarg in request_kwargs:
kwargs[self.slug_url_kwarg] = request_kwargs[self.slug_url_kwarg]
return kwargs |
def sum_data(filter_data, is_bw):
""" caculate sum"""
for index in range(len(filter_data) - 1):
if filter_data[index][0] > filter_data[index + 1][0]:
max_index = index + 1
break
else:
max_index = len(filter_data)
print("max_index: ", max_index + 1)
num_jobs = int(round(len(filter_data) * 1.0 / max_index))
print("num_jobs: ", num_jobs)
dict_time = Counter(filter_data[:, 0])
list_sum = []
for time_index in range(1, max_index + 1):
if dict_time.get(time_index * 1000, 0) != num_jobs:
print("[WARNING] Time %d, number of data %d != num_jobs %d" % (
time_index * 1000, dict_time.get(time_index * 1000, 0), num_jobs
))
continue
filter_mask = (filter_data[:, 0] == time_index * 1000)
sum_rst = np.sum(filter_data[filter_mask][:, 1])
if is_bw:
sum_rst = sum_rst / 1024
list_sum.append([time_index, sum_rst])
return np.array(list_sum) | caculate sum | Below is the the instruction that describes the task:
### Input:
caculate sum
### Response:
def sum_data(filter_data, is_bw):
""" caculate sum"""
for index in range(len(filter_data) - 1):
if filter_data[index][0] > filter_data[index + 1][0]:
max_index = index + 1
break
else:
max_index = len(filter_data)
print("max_index: ", max_index + 1)
num_jobs = int(round(len(filter_data) * 1.0 / max_index))
print("num_jobs: ", num_jobs)
dict_time = Counter(filter_data[:, 0])
list_sum = []
for time_index in range(1, max_index + 1):
if dict_time.get(time_index * 1000, 0) != num_jobs:
print("[WARNING] Time %d, number of data %d != num_jobs %d" % (
time_index * 1000, dict_time.get(time_index * 1000, 0), num_jobs
))
continue
filter_mask = (filter_data[:, 0] == time_index * 1000)
sum_rst = np.sum(filter_data[filter_mask][:, 1])
if is_bw:
sum_rst = sum_rst / 1024
list_sum.append([time_index, sum_rst])
return np.array(list_sum) |
def message(message_type, payload, payload_length):
""" Build a message. """
return packet.build(
Container(
type=message_type,
id=1,
refer=0,
sent=Container(
secs=0,
usecs=0
),
recv=Container(
secs=0,
usecs=0
),
payload_length=payload_length,
payload=payload
)
) | Build a message. | Below is the the instruction that describes the task:
### Input:
Build a message.
### Response:
def message(message_type, payload, payload_length):
""" Build a message. """
return packet.build(
Container(
type=message_type,
id=1,
refer=0,
sent=Container(
secs=0,
usecs=0
),
recv=Container(
secs=0,
usecs=0
),
payload_length=payload_length,
payload=payload
)
) |
def show_portindex_interface_info_output_show_portindex_interface_show_portindex_port_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_portindex_interface_info = ET.Element("show_portindex_interface_info")
config = show_portindex_interface_info
output = ET.SubElement(show_portindex_interface_info, "output")
show_portindex_interface = ET.SubElement(output, "show-portindex-interface")
portsgroup_rbridgeid_key = ET.SubElement(show_portindex_interface, "portsgroup-rbridgeid")
portsgroup_rbridgeid_key.text = kwargs.pop('portsgroup_rbridgeid')
show_portindex = ET.SubElement(show_portindex_interface, "show-portindex")
port_index_key = ET.SubElement(show_portindex, "port-index")
port_index_key.text = kwargs.pop('port_index')
port_type = ET.SubElement(show_portindex, "port-type")
port_type.text = kwargs.pop('port_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_portindex_interface_info_output_show_portindex_interface_show_portindex_port_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_portindex_interface_info = ET.Element("show_portindex_interface_info")
config = show_portindex_interface_info
output = ET.SubElement(show_portindex_interface_info, "output")
show_portindex_interface = ET.SubElement(output, "show-portindex-interface")
portsgroup_rbridgeid_key = ET.SubElement(show_portindex_interface, "portsgroup-rbridgeid")
portsgroup_rbridgeid_key.text = kwargs.pop('portsgroup_rbridgeid')
show_portindex = ET.SubElement(show_portindex_interface, "show-portindex")
port_index_key = ET.SubElement(show_portindex, "port-index")
port_index_key.text = kwargs.pop('port_index')
port_type = ET.SubElement(show_portindex, "port-type")
port_type.text = kwargs.pop('port_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def whitespace_around_comma(logical_line):
r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0] | r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2) | Below is the the instruction that describes the task:
### Input:
r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
### Response:
def whitespace_around_comma(logical_line):
r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0] |
def energy_data():
"""
Connects to the database and loads Readings for device 8.
"""
cur = db.cursor().execute("""SELECT timestamp, current FROM Readings""")
original = TimeSeries()
original.initialize_from_sql_cursor(cur)
original.normalize("day", fusionMethod = "sum")
return itty.Response(json.dumps(original, cls=PycastEncoder), content_type='application/json') | Connects to the database and loads Readings for device 8. | Below is the the instruction that describes the task:
### Input:
Connects to the database and loads Readings for device 8.
### Response:
def energy_data():
"""
Connects to the database and loads Readings for device 8.
"""
cur = db.cursor().execute("""SELECT timestamp, current FROM Readings""")
original = TimeSeries()
original.initialize_from_sql_cursor(cur)
original.normalize("day", fusionMethod = "sum")
return itty.Response(json.dumps(original, cls=PycastEncoder), content_type='application/json') |
def status(self, *msg):
"""
Prints a status message
"""
label = colors.yellow("STATUS")
self._msg(label, *msg) | Prints a status message | Below is the the instruction that describes the task:
### Input:
Prints a status message
### Response:
def status(self, *msg):
"""
Prints a status message
"""
label = colors.yellow("STATUS")
self._msg(label, *msg) |
def is_scalar(self):
"""
:return:
:rtype: bool
"""
return \
isinstance(self._element_template, Boolean) or \
isinstance(self._element_template, Float) or \
isinstance(self._element_template, Integer) or \
isinstance(self._element_template, String) | :return:
:rtype: bool | Below is the the instruction that describes the task:
### Input:
:return:
:rtype: bool
### Response:
def is_scalar(self):
"""
:return:
:rtype: bool
"""
return \
isinstance(self._element_template, Boolean) or \
isinstance(self._element_template, Float) or \
isinstance(self._element_template, Integer) or \
isinstance(self._element_template, String) |
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n' | Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance. | Below is the the instruction that describes the task:
### Input:
Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
### Response:
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n' |
def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
if not exists(package):
return None
name, path = None, None
enforce_init = kwargs.pop('enforce_init', True)
if isdir(package):
if isfile(join(package, '__init__.py')) or not enforce_init:
name, path = basename(package), package
elif isfile(package) and package.endswith('.py'):
name, path = splitext(basename(package))[0], package
if name and path:
return PackageSpec(name, path)
return None | Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None. | Below is the the instruction that describes the task:
### Input:
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
### Response:
def find(self, package, **kwargs):
"""
Find method.
Args:
package (str): package to find.
**kwargs (): additional keyword arguments.
Returns:
PackageSpec: the PackageSpec corresponding to the package, or None.
"""
if not exists(package):
return None
name, path = None, None
enforce_init = kwargs.pop('enforce_init', True)
if isdir(package):
if isfile(join(package, '__init__.py')) or not enforce_init:
name, path = basename(package), package
elif isfile(package) and package.endswith('.py'):
name, path = splitext(basename(package))[0], package
if name and path:
return PackageSpec(name, path)
return None |
def html_to_rgb(colorstring):
""" convert #RRGGBB to an (R, G, B) tuple """
colorstring = colorstring.strip()
if colorstring[0] == '#':
colorstring = colorstring[1:]
if len(colorstring) != 6:
raise ValueError("input #%s is not in #RRGGBB format" % colorstring)
r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]
r, g, b = [int(n, 16) for n in (r, g, b)]
return (r, g, b, 255) | convert #RRGGBB to an (R, G, B) tuple | Below is the the instruction that describes the task:
### Input:
convert #RRGGBB to an (R, G, B) tuple
### Response:
def html_to_rgb(colorstring):
""" convert #RRGGBB to an (R, G, B) tuple """
colorstring = colorstring.strip()
if colorstring[0] == '#':
colorstring = colorstring[1:]
if len(colorstring) != 6:
raise ValueError("input #%s is not in #RRGGBB format" % colorstring)
r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:]
r, g, b = [int(n, 16) for n in (r, g, b)]
return (r, g, b, 255) |
def recursive_path(pack, path):
"""Find paths recursively"""
matches = []
for root, _, filenames in os.walk(os.path.join(pack, path)):
for filename in filenames:
matches.append(os.path.join(root, filename)[len(pack) + 1:])
return matches | Find paths recursively | Below is the the instruction that describes the task:
### Input:
Find paths recursively
### Response:
def recursive_path(pack, path):
"""Find paths recursively"""
matches = []
for root, _, filenames in os.walk(os.path.join(pack, path)):
for filename in filenames:
matches.append(os.path.join(root, filename)[len(pack) + 1:])
return matches |
def lazy_load_font(font_size=default_font_size):
"""
Lazy loading font according to system platform
"""
if font_size not in _font_cache:
if _platform.startswith("darwin"):
font_path = "/Library/Fonts/Arial.ttf"
elif _platform.startswith("linux"):
font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf"
elif _platform.startswith("win32"):
font_path = "C:\\Windows\\Fonts\\arial.ttf"
_font_cache[font_size] = ImageFont.truetype(font_path, font_size)
return _font_cache[font_size] | Lazy loading font according to system platform | Below is the the instruction that describes the task:
### Input:
Lazy loading font according to system platform
### Response:
def lazy_load_font(font_size=default_font_size):
"""
Lazy loading font according to system platform
"""
if font_size not in _font_cache:
if _platform.startswith("darwin"):
font_path = "/Library/Fonts/Arial.ttf"
elif _platform.startswith("linux"):
font_path = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf"
elif _platform.startswith("win32"):
font_path = "C:\\Windows\\Fonts\\arial.ttf"
_font_cache[font_size] = ImageFont.truetype(font_path, font_size)
return _font_cache[font_size] |
def main(arguments, toxinidir=None):
"ctox: tox with conda."
try: # pragma: no cover
# Exit on broken pipe.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
import sys
sys.exit(ctox(arguments, toxinidir))
except CalledProcessError as c:
print(c.output)
return 1
except NotImplementedError as e:
gh = "https://github.com/hayd/ctox/issues"
from colorama import Style
cprint(Style.BRIGHT + str(e), 'err')
cprint("If this is a valid tox.ini substitution, please open an issue on\n"
"github and request support: %s." % gh, 'warn')
return 1
except KeyboardInterrupt: # pragma: no cover
return 1 | ctox: tox with conda. | Below is the the instruction that describes the task:
### Input:
ctox: tox with conda.
### Response:
def main(arguments, toxinidir=None):
"ctox: tox with conda."
try: # pragma: no cover
# Exit on broken pipe.
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
import sys
sys.exit(ctox(arguments, toxinidir))
except CalledProcessError as c:
print(c.output)
return 1
except NotImplementedError as e:
gh = "https://github.com/hayd/ctox/issues"
from colorama import Style
cprint(Style.BRIGHT + str(e), 'err')
cprint("If this is a valid tox.ini substitution, please open an issue on\n"
"github and request support: %s." % gh, 'warn')
return 1
except KeyboardInterrupt: # pragma: no cover
return 1 |
def calculate_basic_cost(self, d1, d2):
"""
Calculates assignment cost between two cells.
"""
distance = euclidean_dist(d1.center, d2.center) / self.scale
area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area)
return distance + self.parameters_cost_initial["area_weight"] * area_change | Calculates assignment cost between two cells. | Below is the the instruction that describes the task:
### Input:
Calculates assignment cost between two cells.
### Response:
def calculate_basic_cost(self, d1, d2):
"""
Calculates assignment cost between two cells.
"""
distance = euclidean_dist(d1.center, d2.center) / self.scale
area_change = 1 - min(d1.area, d2.area) / max(d1.area, d2.area)
return distance + self.parameters_cost_initial["area_weight"] * area_change |
def do_output(self, *args):
"""Pass a command directly to the current output processor
"""
if args:
action, params = args[0], args[1:]
log.debug("Pass %s directly to output with %s", action, params)
function = getattr(self.output, "do_" + action, None)
if function:
function(*params) | Pass a command directly to the current output processor | Below is the the instruction that describes the task:
### Input:
Pass a command directly to the current output processor
### Response:
def do_output(self, *args):
"""Pass a command directly to the current output processor
"""
if args:
action, params = args[0], args[1:]
log.debug("Pass %s directly to output with %s", action, params)
function = getattr(self.output, "do_" + action, None)
if function:
function(*params) |
def get_ssh_client(ip,
ssh_private_key_file,
ssh_user='root',
port=22,
timeout=600,
wait_period=10):
"""Attempt to establish and test ssh connection."""
if ip in CLIENT_CACHE:
return CLIENT_CACHE[ip]
start = time.time()
end = start + timeout
client = None
while time.time() < end:
try:
client = establish_ssh_connection(
ip,
ssh_private_key_file,
ssh_user,
port,
timeout=wait_period
)
execute_ssh_command(client, 'ls')
except: # noqa: E722
if client:
client.close()
wait_period += wait_period
else:
CLIENT_CACHE[ip] = client
return client
raise IpaSSHException(
'Attempt to establish SSH connection failed.'
) | Attempt to establish and test ssh connection. | Below is the the instruction that describes the task:
### Input:
Attempt to establish and test ssh connection.
### Response:
def get_ssh_client(ip,
ssh_private_key_file,
ssh_user='root',
port=22,
timeout=600,
wait_period=10):
"""Attempt to establish and test ssh connection."""
if ip in CLIENT_CACHE:
return CLIENT_CACHE[ip]
start = time.time()
end = start + timeout
client = None
while time.time() < end:
try:
client = establish_ssh_connection(
ip,
ssh_private_key_file,
ssh_user,
port,
timeout=wait_period
)
execute_ssh_command(client, 'ls')
except: # noqa: E722
if client:
client.close()
wait_period += wait_period
else:
CLIENT_CACHE[ip] = client
return client
raise IpaSSHException(
'Attempt to establish SSH connection failed.'
) |
def _JzStaeckelIntegrandSquared(v,E,Lz,I3V,delta,u0,cosh2u0,sinh2u0,
potu0pi2,pot):
#potu0pi2= potentialStaeckel(u0,nu.pi/2.,pot,delta)
"""The J_z integrand: p_v(v)/2/delta^2"""
sin2v= nu.sin(v)**2.
dV= cosh2u0*potu0pi2\
-(sinh2u0+sin2v)*potentialStaeckel(u0,v,pot,delta)
return E*sin2v+I3V+dV-Lz**2./2./delta**2./sin2v | The J_z integrand: p_v(v)/2/delta^2 | Below is the the instruction that describes the task:
### Input:
The J_z integrand: p_v(v)/2/delta^2
### Response:
def _JzStaeckelIntegrandSquared(v,E,Lz,I3V,delta,u0,cosh2u0,sinh2u0,
potu0pi2,pot):
#potu0pi2= potentialStaeckel(u0,nu.pi/2.,pot,delta)
"""The J_z integrand: p_v(v)/2/delta^2"""
sin2v= nu.sin(v)**2.
dV= cosh2u0*potu0pi2\
-(sinh2u0+sin2v)*potentialStaeckel(u0,v,pot,delta)
return E*sin2v+I3V+dV-Lz**2./2./delta**2./sin2v |
def prune(self, var, value, removals):
"Rule out var=value."
self.curr_domains[var].remove(value)
if removals is not None: removals.append((var, value)) | Rule out var=value. | Below is the the instruction that describes the task:
### Input:
Rule out var=value.
### Response:
def prune(self, var, value, removals):
"Rule out var=value."
self.curr_domains[var].remove(value)
if removals is not None: removals.append((var, value)) |
def list(self, roomId, mentionedPeople=None, before=None,
beforeMessage=None, max=None, **request_parameters):
"""Lists messages in a room.
Each message will include content attachments if present.
The list API sorts the messages in descending order by creation date.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all messages returned by the
query. The generator will automatically request additional 'pages' of
responses from Webex as needed until all responses have been returned.
The container makes the generator safe for reuse. A new API call will
be made, using the same parameters that were specified when the
generator was created, every time a new iterator is requested from the
container.
Args:
roomId(basestring): List messages for a room, by ID.
mentionedPeople(basestring): List messages where the caller is
mentioned by specifying "me" or the caller `personId`.
before(basestring): List messages sent before a date and time, in
ISO8601 format.
beforeMessage(basestring): List messages sent before a message,
by ID.
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the messages returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(roomId, basestring, may_be_none=False)
check_type(mentionedPeople, basestring)
check_type(before, basestring)
check_type(beforeMessage, basestring)
check_type(max, int)
params = dict_from_items_with_values(
request_parameters,
roomId=roomId,
mentionedPeople=mentionedPeople,
before=before,
beforeMessage=beforeMessage,
max=max,
)
# API request - get items
items = self._session.get_items(API_ENDPOINT, params=params)
# Yield message objects created from the returned items JSON objects
for item in items:
yield self._object_factory(OBJECT_TYPE, item) | Lists messages in a room.
Each message will include content attachments if present.
The list API sorts the messages in descending order by creation date.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all messages returned by the
query. The generator will automatically request additional 'pages' of
responses from Webex as needed until all responses have been returned.
The container makes the generator safe for reuse. A new API call will
be made, using the same parameters that were specified when the
generator was created, every time a new iterator is requested from the
container.
Args:
roomId(basestring): List messages for a room, by ID.
mentionedPeople(basestring): List messages where the caller is
mentioned by specifying "me" or the caller `personId`.
before(basestring): List messages sent before a date and time, in
ISO8601 format.
beforeMessage(basestring): List messages sent before a message,
by ID.
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the messages returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error. | Below is the the instruction that describes the task:
### Input:
Lists messages in a room.
Each message will include content attachments if present.
The list API sorts the messages in descending order by creation date.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all messages returned by the
query. The generator will automatically request additional 'pages' of
responses from Webex as needed until all responses have been returned.
The container makes the generator safe for reuse. A new API call will
be made, using the same parameters that were specified when the
generator was created, every time a new iterator is requested from the
container.
Args:
roomId(basestring): List messages for a room, by ID.
mentionedPeople(basestring): List messages where the caller is
mentioned by specifying "me" or the caller `personId`.
before(basestring): List messages sent before a date and time, in
ISO8601 format.
beforeMessage(basestring): List messages sent before a message,
by ID.
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the messages returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
### Response:
def list(self, roomId, mentionedPeople=None, before=None,
beforeMessage=None, max=None, **request_parameters):
"""Lists messages in a room.
Each message will include content attachments if present.
The list API sorts the messages in descending order by creation date.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all messages returned by the
query. The generator will automatically request additional 'pages' of
responses from Webex as needed until all responses have been returned.
The container makes the generator safe for reuse. A new API call will
be made, using the same parameters that were specified when the
generator was created, every time a new iterator is requested from the
container.
Args:
roomId(basestring): List messages for a room, by ID.
mentionedPeople(basestring): List messages where the caller is
mentioned by specifying "me" or the caller `personId`.
before(basestring): List messages sent before a date and time, in
ISO8601 format.
beforeMessage(basestring): List messages sent before a message,
by ID.
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the messages returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(roomId, basestring, may_be_none=False)
check_type(mentionedPeople, basestring)
check_type(before, basestring)
check_type(beforeMessage, basestring)
check_type(max, int)
params = dict_from_items_with_values(
request_parameters,
roomId=roomId,
mentionedPeople=mentionedPeople,
before=before,
beforeMessage=beforeMessage,
max=max,
)
# API request - get items
items = self._session.get_items(API_ENDPOINT, params=params)
# Yield message objects created from the returned items JSON objects
for item in items:
yield self._object_factory(OBJECT_TYPE, item) |
def generate_enum_doc(enum_descriptor, locations, path, name_prefix=''):
"""Generate doc for an enum.
Args:
enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the enum definition.
name_prefix: Optional prefix for this enum's name.
"""
print(make_subsection(name_prefix + enum_descriptor.name))
location = locations[path]
if location.HasField('leading_comments'):
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for value_index, value in enumerate(enum_descriptor.value):
field_location = locations[path + (2, value_index)]
row_tuples.append((
make_code(value.name),
value.number,
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table(('Name', 'Number', 'Description'), row_tuples) | Generate doc for an enum.
Args:
enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the enum definition.
name_prefix: Optional prefix for this enum's name. | Below is the the instruction that describes the task:
### Input:
Generate doc for an enum.
Args:
enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the enum definition.
name_prefix: Optional prefix for this enum's name.
### Response:
def generate_enum_doc(enum_descriptor, locations, path, name_prefix=''):
"""Generate doc for an enum.
Args:
enum_descriptor: descriptor_pb2.EnumDescriptorProto instance for enum
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the enum definition.
name_prefix: Optional prefix for this enum's name.
"""
print(make_subsection(name_prefix + enum_descriptor.name))
location = locations[path]
if location.HasField('leading_comments'):
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for value_index, value in enumerate(enum_descriptor.value):
field_location = locations[path + (2, value_index)]
row_tuples.append((
make_code(value.name),
value.number,
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table(('Name', 'Number', 'Description'), row_tuples) |
def customized_warning(message, category=UserWarning,
filename='', lineno=-1, file=None, line=None):
"""
Customized function to display warnings.
Monkey patch for `warnings.showwarning`.
"""
print("WARNING: {0}".format(message)) | Customized function to display warnings.
Monkey patch for `warnings.showwarning`. | Below is the the instruction that describes the task:
### Input:
Customized function to display warnings.
Monkey patch for `warnings.showwarning`.
### Response:
def customized_warning(message, category=UserWarning,
filename='', lineno=-1, file=None, line=None):
"""
Customized function to display warnings.
Monkey patch for `warnings.showwarning`.
"""
print("WARNING: {0}".format(message)) |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return TOS_THR(key)
if key not in TOS_THR._member_map_:
extend_enum(TOS_THR, key, default)
return TOS_THR[key] | Backport support for original codes. | Below is the the instruction that describes the task:
### Input:
Backport support for original codes.
### Response:
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return TOS_THR(key)
if key not in TOS_THR._member_map_:
extend_enum(TOS_THR, key, default)
return TOS_THR[key] |
def FindExtensionByName(self, full_name):
"""Loads the named extension descriptor from the pool.
Args:
full_name: The full name of the extension descriptor to load.
Returns:
A FieldDescriptor, describing the named extension.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, extension_name = full_name.rpartition('.')
try:
# Most extensions are nested inside a message.
scope = self.FindMessageTypeByName(message_name)
except KeyError:
# Some extensions are defined at file scope.
scope = self.FindFileContainingSymbol(full_name)
return scope.extensions_by_name[extension_name] | Loads the named extension descriptor from the pool.
Args:
full_name: The full name of the extension descriptor to load.
Returns:
A FieldDescriptor, describing the named extension. | Below is the the instruction that describes the task:
### Input:
Loads the named extension descriptor from the pool.
Args:
full_name: The full name of the extension descriptor to load.
Returns:
A FieldDescriptor, describing the named extension.
### Response:
def FindExtensionByName(self, full_name):
"""Loads the named extension descriptor from the pool.
Args:
full_name: The full name of the extension descriptor to load.
Returns:
A FieldDescriptor, describing the named extension.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, extension_name = full_name.rpartition('.')
try:
# Most extensions are nested inside a message.
scope = self.FindMessageTypeByName(message_name)
except KeyError:
# Some extensions are defined at file scope.
scope = self.FindFileContainingSymbol(full_name)
return scope.extensions_by_name[extension_name] |
def extractblocks(img, blksz, stpsz=None):
"""Extract blocks from an ndarray signal into an ndarray.
Parameters
----------
img : ndarray or tuple of ndarrays
nd array of images, or tuple of images
blksz : tuple
tuple of block sizes, blocks are taken starting from the first index
of img
stpsz : tuple, optional (default None, corresponds to steps of 1)
tuple of step sizes between neighboring blocks
Returns
-------
blks : ndarray
image blocks
"""
# See http://stackoverflow.com/questions/16774148 and
# sklearn.feature_extraction.image.extract_patches_2d
if isinstance(img, tuple):
img = np.stack(img, axis=-1)
if stpsz is None:
stpsz = (1,) * len(blksz)
imgsz = img.shape
# Calculate the number of blocks that can fit in each dimension of
# the images
numblocks = tuple(int(np.floor((a - b) / c) + 1) for a, b, c in
zip_longest(imgsz, blksz, stpsz, fillvalue=1))
# Calculate the strides for blocks
blockstrides = tuple(a * b for a, b in zip_longest(img.strides, stpsz,
fillvalue=1))
new_shape = blksz + numblocks
new_strides = img.strides[:len(blksz)] + blockstrides
blks = np.lib.stride_tricks.as_strided(img, new_shape, new_strides)
return np.reshape(blks, blksz + (-1,)) | Extract blocks from an ndarray signal into an ndarray.
Parameters
----------
img : ndarray or tuple of ndarrays
nd array of images, or tuple of images
blksz : tuple
tuple of block sizes, blocks are taken starting from the first index
of img
stpsz : tuple, optional (default None, corresponds to steps of 1)
tuple of step sizes between neighboring blocks
Returns
-------
blks : ndarray
image blocks | Below is the the instruction that describes the task:
### Input:
Extract blocks from an ndarray signal into an ndarray.
Parameters
----------
img : ndarray or tuple of ndarrays
nd array of images, or tuple of images
blksz : tuple
tuple of block sizes, blocks are taken starting from the first index
of img
stpsz : tuple, optional (default None, corresponds to steps of 1)
tuple of step sizes between neighboring blocks
Returns
-------
blks : ndarray
image blocks
### Response:
def extractblocks(img, blksz, stpsz=None):
"""Extract blocks from an ndarray signal into an ndarray.
Parameters
----------
img : ndarray or tuple of ndarrays
nd array of images, or tuple of images
blksz : tuple
tuple of block sizes, blocks are taken starting from the first index
of img
stpsz : tuple, optional (default None, corresponds to steps of 1)
tuple of step sizes between neighboring blocks
Returns
-------
blks : ndarray
image blocks
"""
# See http://stackoverflow.com/questions/16774148 and
# sklearn.feature_extraction.image.extract_patches_2d
if isinstance(img, tuple):
img = np.stack(img, axis=-1)
if stpsz is None:
stpsz = (1,) * len(blksz)
imgsz = img.shape
# Calculate the number of blocks that can fit in each dimension of
# the images
numblocks = tuple(int(np.floor((a - b) / c) + 1) for a, b, c in
zip_longest(imgsz, blksz, stpsz, fillvalue=1))
# Calculate the strides for blocks
blockstrides = tuple(a * b for a, b in zip_longest(img.strides, stpsz,
fillvalue=1))
new_shape = blksz + numblocks
new_strides = img.strides[:len(blksz)] + blockstrides
blks = np.lib.stride_tricks.as_strided(img, new_shape, new_strides)
return np.reshape(blks, blksz + (-1,)) |
def get_nameserver_detail_output_show_nameserver_nameserver_fabric_portname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_nameserver_detail = ET.Element("get_nameserver_detail")
config = get_nameserver_detail
output = ET.SubElement(get_nameserver_detail, "output")
show_nameserver = ET.SubElement(output, "show-nameserver")
nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid")
nameserver_portid_key.text = kwargs.pop('nameserver_portid')
nameserver_fabric_portname = ET.SubElement(show_nameserver, "nameserver-fabric-portname")
nameserver_fabric_portname.text = kwargs.pop('nameserver_fabric_portname')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_nameserver_detail_output_show_nameserver_nameserver_fabric_portname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_nameserver_detail = ET.Element("get_nameserver_detail")
config = get_nameserver_detail
output = ET.SubElement(get_nameserver_detail, "output")
show_nameserver = ET.SubElement(output, "show-nameserver")
nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid")
nameserver_portid_key.text = kwargs.pop('nameserver_portid')
nameserver_fabric_portname = ET.SubElement(show_nameserver, "nameserver-fabric-portname")
nameserver_fabric_portname.text = kwargs.pop('nameserver_fabric_portname')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
async def get_cred_info_by_id(self, cred_id: str) -> str:
"""
Return cred-info json from wallet by wallet credential identifier.
Raise AbsentCred for no such credential. Raise WalletState if the wallet is closed.
:param cred_id: credential identifier of interest
:return: json with cred for input credential identifier
:return: cred-info json; i.e.,
::
{
"referent": string, # credential identifier in the wallet
"attrs": {
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value2", "encoded": "value2_as_int" },
...
}
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}
"""
LOGGER.debug('HolderProver.get_cred_info_by_id >>> cred_id: %s', cred_id)
if not self.wallet.handle:
LOGGER.debug('HolderProver.get_cred_info_by_id <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
rv_json = await anoncreds.prover_get_credential(self.wallet.handle, cred_id)
except IndyError as x_indy: # no such cred
if x_indy.error_code == ErrorCode.WalletItemNotFound:
LOGGER.debug(
'HolderProver.get_cred_info_by_id <!< no cred in wallet %s for cred id %s',
self.name,
cred_id)
raise AbsentCred('No cred in wallet for {}'.format(cred_id))
LOGGER.debug(
'HolderProver.get_cred_info_by_id <!< wallet %s, cred id %s: indy error code %s',
self.name,
cred_id,
x_indy.error_code)
raise
LOGGER.debug('HolderProver.get_cred_info_by_id <<< %s', rv_json)
return rv_json | Return cred-info json from wallet by wallet credential identifier.
Raise AbsentCred for no such credential. Raise WalletState if the wallet is closed.
:param cred_id: credential identifier of interest
:return: json with cred for input credential identifier
:return: cred-info json; i.e.,
::
{
"referent": string, # credential identifier in the wallet
"attrs": {
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value2", "encoded": "value2_as_int" },
...
}
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
} | Below is the the instruction that describes the task:
### Input:
Return cred-info json from wallet by wallet credential identifier.
Raise AbsentCred for no such credential. Raise WalletState if the wallet is closed.
:param cred_id: credential identifier of interest
:return: json with cred for input credential identifier
:return: cred-info json; i.e.,
::
{
"referent": string, # credential identifier in the wallet
"attrs": {
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value2", "encoded": "value2_as_int" },
...
}
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}
### Response:
async def get_cred_info_by_id(self, cred_id: str) -> str:
"""
Return cred-info json from wallet by wallet credential identifier.
Raise AbsentCred for no such credential. Raise WalletState if the wallet is closed.
:param cred_id: credential identifier of interest
:return: json with cred for input credential identifier
:return: cred-info json; i.e.,
::
{
"referent": string, # credential identifier in the wallet
"attrs": {
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value2", "encoded": "value2_as_int" },
...
}
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}
"""
LOGGER.debug('HolderProver.get_cred_info_by_id >>> cred_id: %s', cred_id)
if not self.wallet.handle:
LOGGER.debug('HolderProver.get_cred_info_by_id <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
rv_json = await anoncreds.prover_get_credential(self.wallet.handle, cred_id)
except IndyError as x_indy: # no such cred
if x_indy.error_code == ErrorCode.WalletItemNotFound:
LOGGER.debug(
'HolderProver.get_cred_info_by_id <!< no cred in wallet %s for cred id %s',
self.name,
cred_id)
raise AbsentCred('No cred in wallet for {}'.format(cred_id))
LOGGER.debug(
'HolderProver.get_cred_info_by_id <!< wallet %s, cred id %s: indy error code %s',
self.name,
cred_id,
x_indy.error_code)
raise
LOGGER.debug('HolderProver.get_cred_info_by_id <<< %s', rv_json)
return rv_json |
def prevweekday(when, weekday, inclusive=True):
"""
Return the date for the most recent day of the week. If inclusive is True (the default) today
may count as the weekday we're looking for.
"""
if isinstance(when, datetime):
when = when.date()
delta = weekday - when.weekday()
if (inclusive and delta > 0) or (not inclusive and delta >= 0):
delta -= 7
return when + timedelta(days=delta) | Return the date for the most recent day of the week. If inclusive is True (the default) today
may count as the weekday we're looking for. | Below is the the instruction that describes the task:
### Input:
Return the date for the most recent day of the week. If inclusive is True (the default) today
may count as the weekday we're looking for.
### Response:
def prevweekday(when, weekday, inclusive=True):
"""
Return the date for the most recent day of the week. If inclusive is True (the default) today
may count as the weekday we're looking for.
"""
if isinstance(when, datetime):
when = when.date()
delta = weekday - when.weekday()
if (inclusive and delta > 0) or (not inclusive and delta >= 0):
delta -= 7
return when + timedelta(days=delta) |
def configs(max_configs=1, offset=None, serial=False, create_uuid=True):
"""Generate max configs, each one a dictionary. e.g. [{'x': 1}]
Will also add a config UUID, useful for tracking configs.
You can turn this off by passing create_uuid=False.
"""
global default_selector
return default_selector.configs(max_configs, offset, serial, create_uuid) | Generate max configs, each one a dictionary. e.g. [{'x': 1}]
Will also add a config UUID, useful for tracking configs.
You can turn this off by passing create_uuid=False. | Below is the the instruction that describes the task:
### Input:
Generate max configs, each one a dictionary. e.g. [{'x': 1}]
Will also add a config UUID, useful for tracking configs.
You can turn this off by passing create_uuid=False.
### Response:
def configs(max_configs=1, offset=None, serial=False, create_uuid=True):
"""Generate max configs, each one a dictionary. e.g. [{'x': 1}]
Will also add a config UUID, useful for tracking configs.
You can turn this off by passing create_uuid=False.
"""
global default_selector
return default_selector.configs(max_configs, offset, serial, create_uuid) |
def build_eval_path(self, epoch, iteration):
"""
Appends index of the current epoch and index of the current iteration
to the name of the file with results.
:param epoch: index of the current epoch
:param iteration: index of the current iteration
"""
if iteration is not None:
eval_fname = f'eval_epoch_{epoch}_iter_{iteration}'
else:
eval_fname = f'eval_epoch_{epoch}'
eval_path = os.path.join(self.save_path, eval_fname)
return eval_path | Appends index of the current epoch and index of the current iteration
to the name of the file with results.
:param epoch: index of the current epoch
:param iteration: index of the current iteration | Below is the the instruction that describes the task:
### Input:
Appends index of the current epoch and index of the current iteration
to the name of the file with results.
:param epoch: index of the current epoch
:param iteration: index of the current iteration
### Response:
def build_eval_path(self, epoch, iteration):
"""
Appends index of the current epoch and index of the current iteration
to the name of the file with results.
:param epoch: index of the current epoch
:param iteration: index of the current iteration
"""
if iteration is not None:
eval_fname = f'eval_epoch_{epoch}_iter_{iteration}'
else:
eval_fname = f'eval_epoch_{epoch}'
eval_path = os.path.join(self.save_path, eval_fname)
return eval_path |
def items_convert(self, fields=[], **kwargs):
'''taobao.taobaoke.items.convert 淘客商品转换
淘宝客商品转换'''
request = TOPRequest('taobao.taobaoke.items.convert')
if not fields:
taobaokeItem = TaobaokeItem()
fields = taobaokeItem.fields
request['fields'] = fields
for k, v in kwargs.iteritems():
if k not in ('nick', 'outer_code', 'num_iids', 'pid', 'is_mobile') and v==None: continue
request[k] = v
self.create(self.execute(request), fields=['taobaoke_items', 'total_results'], models={'taobaoke_items':TaobaokeItem})
return self.taobaoke_items | taobao.taobaoke.items.convert 淘客商品转换
淘宝客商品转换 | Below is the the instruction that describes the task:
### Input:
taobao.taobaoke.items.convert 淘客商品转换
淘宝客商品转换
### Response:
def items_convert(self, fields=[], **kwargs):
'''taobao.taobaoke.items.convert 淘客商品转换
淘宝客商品转换'''
request = TOPRequest('taobao.taobaoke.items.convert')
if not fields:
taobaokeItem = TaobaokeItem()
fields = taobaokeItem.fields
request['fields'] = fields
for k, v in kwargs.iteritems():
if k not in ('nick', 'outer_code', 'num_iids', 'pid', 'is_mobile') and v==None: continue
request[k] = v
self.create(self.execute(request), fields=['taobaoke_items', 'total_results'], models={'taobaoke_items':TaobaokeItem})
return self.taobaoke_items |
def set_free(self, free):
"""Set free/fixed status """
if free is None:
self.__free__ = False
return
self.__free__ = bool(free) | Set free/fixed status | Below is the the instruction that describes the task:
### Input:
Set free/fixed status
### Response:
def set_free(self, free):
"""Set free/fixed status """
if free is None:
self.__free__ = False
return
self.__free__ = bool(free) |
def parse(self, expression):
"""
Evaluates 'expression' and returns it's value(s)
"""
if isinstance(expression, (list, dict)):
return (True if expression else False, expression)
if sys.version_info[0] > 2:
self.next = self.tokenize(expression).__next__
else:
self.next = self.tokenize(expression).next
self.token = self.next()
return self.expression() | Evaluates 'expression' and returns it's value(s) | Below is the the instruction that describes the task:
### Input:
Evaluates 'expression' and returns it's value(s)
### Response:
def parse(self, expression):
"""
Evaluates 'expression' and returns it's value(s)
"""
if isinstance(expression, (list, dict)):
return (True if expression else False, expression)
if sys.version_info[0] > 2:
self.next = self.tokenize(expression).__next__
else:
self.next = self.tokenize(expression).next
self.token = self.next()
return self.expression() |
async def create_conversation(self, create_conversation_request):
"""Create a new conversation."""
response = hangouts_pb2.CreateConversationResponse()
await self._pb_request('conversations/createconversation',
create_conversation_request, response)
return response | Create a new conversation. | Below is the the instruction that describes the task:
### Input:
Create a new conversation.
### Response:
async def create_conversation(self, create_conversation_request):
"""Create a new conversation."""
response = hangouts_pb2.CreateConversationResponse()
await self._pb_request('conversations/createconversation',
create_conversation_request, response)
return response |
def __get_condition(self, url):
"""
Gets the condition for a url and validates it.
:param str url: The url to get the condition for
"""
if self.__heuristics_condition is not None:
return self.__heuristics_condition
if "pass_heuristics_condition" in self.__sites_object[url]:
condition = \
self.__sites_object[url]["pass_heuristics_condition"]
else:
condition = \
self.cfg_heuristics["pass_heuristics_condition"]
# Because the condition will be eval-ed (Yeah, eval is evil, BUT only
# when not filtered properly), we are filtering it here.
# Anyway, if that filter-method is not perfect: This is not any
# random user-input thats evaled. This is (hopefully still when you
# read this) not a webtool, where you need to filter everything 100%
# properly.
disalloweds = condition
heuristics = self.__get_enabled_heuristics(url)
for allowed in self.__condition_allowed:
disalloweds = disalloweds.replace(allowed, " ")
for heuristic, _ in heuristics.items():
disalloweds = re.sub(r"\b%s\b" % heuristic, " ", disalloweds)
disalloweds = disalloweds.split(" ")
for disallowed in disalloweds:
if disallowed != "":
self.log.error("Misconfiguration: In the condition,"
" an unknown heuristic was found and"
" will be ignored: %s", disallowed)
condition = re.sub(r"\b%s\b" % disallowed, "True", condition)
self.__heuristics_condition = condition
# Now condition should just consits of not, and, or, (, ), and all
# enabled heuristics.
return condition | Gets the condition for a url and validates it.
:param str url: The url to get the condition for | Below is the the instruction that describes the task:
### Input:
Gets the condition for a url and validates it.
:param str url: The url to get the condition for
### Response:
def __get_condition(self, url):
"""
Gets the condition for a url and validates it.
:param str url: The url to get the condition for
"""
if self.__heuristics_condition is not None:
return self.__heuristics_condition
if "pass_heuristics_condition" in self.__sites_object[url]:
condition = \
self.__sites_object[url]["pass_heuristics_condition"]
else:
condition = \
self.cfg_heuristics["pass_heuristics_condition"]
# Because the condition will be eval-ed (Yeah, eval is evil, BUT only
# when not filtered properly), we are filtering it here.
# Anyway, if that filter-method is not perfect: This is not any
# random user-input thats evaled. This is (hopefully still when you
# read this) not a webtool, where you need to filter everything 100%
# properly.
disalloweds = condition
heuristics = self.__get_enabled_heuristics(url)
for allowed in self.__condition_allowed:
disalloweds = disalloweds.replace(allowed, " ")
for heuristic, _ in heuristics.items():
disalloweds = re.sub(r"\b%s\b" % heuristic, " ", disalloweds)
disalloweds = disalloweds.split(" ")
for disallowed in disalloweds:
if disallowed != "":
self.log.error("Misconfiguration: In the condition,"
" an unknown heuristic was found and"
" will be ignored: %s", disallowed)
condition = re.sub(r"\b%s\b" % disallowed, "True", condition)
self.__heuristics_condition = condition
# Now condition should just consits of not, and, or, (, ), and all
# enabled heuristics.
return condition |
def setrange(self, key, offset, value):
"""Overwrite part of a string at key starting at the specified offset.
:raises TypeError: if offset is not int
:raises ValueError: if offset less than 0
"""
if not isinstance(offset, int):
raise TypeError("offset argument must be int")
if offset < 0:
raise ValueError("offset must be greater equal 0")
return self.execute(b'SETRANGE', key, offset, value) | Overwrite part of a string at key starting at the specified offset.
:raises TypeError: if offset is not int
:raises ValueError: if offset less than 0 | Below is the the instruction that describes the task:
### Input:
Overwrite part of a string at key starting at the specified offset.
:raises TypeError: if offset is not int
:raises ValueError: if offset less than 0
### Response:
def setrange(self, key, offset, value):
"""Overwrite part of a string at key starting at the specified offset.
:raises TypeError: if offset is not int
:raises ValueError: if offset less than 0
"""
if not isinstance(offset, int):
raise TypeError("offset argument must be int")
if offset < 0:
raise ValueError("offset must be greater equal 0")
return self.execute(b'SETRANGE', key, offset, value) |
def satisfies_constraint(self, site):
"""
Checks if a periodic site satisfies the constraint.
"""
if not site.is_ordered:
return False
if self.species_constraints \
and str(site.specie) in self.species_constraints:
satisfies_constraints = True
else:
satisfies_constraints = False
if self.site_constraint_name \
and self.site_constraint_name in site.properties:
prop = site.properties[self.site_constraint_name]
if prop in self.site_constraints:
satisfies_constraints = True
else:
satisfies_constraints = False
return satisfies_constraints | Checks if a periodic site satisfies the constraint. | Below is the the instruction that describes the task:
### Input:
Checks if a periodic site satisfies the constraint.
### Response:
def satisfies_constraint(self, site):
"""
Checks if a periodic site satisfies the constraint.
"""
if not site.is_ordered:
return False
if self.species_constraints \
and str(site.specie) in self.species_constraints:
satisfies_constraints = True
else:
satisfies_constraints = False
if self.site_constraint_name \
and self.site_constraint_name in site.properties:
prop = site.properties[self.site_constraint_name]
if prop in self.site_constraints:
satisfies_constraints = True
else:
satisfies_constraints = False
return satisfies_constraints |
def patch_namespaced_deployment_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_deployment_scale # noqa: E501
partially update scale of the specified Deployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data | patch_namespaced_deployment_scale # noqa: E501
partially update scale of the specified Deployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1Scale
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
patch_namespaced_deployment_scale # noqa: E501
partially update scale of the specified Deployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
### Response:
def patch_namespaced_deployment_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_deployment_scale # noqa: E501
partially update scale of the specified Deployment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: ExtensionsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data |
async def stop_tasks(self, address):
"""Clear all tasks pertaining to a tile.
This coroutine will synchronously cancel all running tasks that were
attached to the given tile and wait for them to stop before returning.
Args:
address (int): The address of the tile we should stop.
"""
tasks = self._tasks.get(address, [])
for task in tasks:
task.cancel()
asyncio.gather(*tasks, return_exceptions=True)
self._tasks[address] = [] | Clear all tasks pertaining to a tile.
This coroutine will synchronously cancel all running tasks that were
attached to the given tile and wait for them to stop before returning.
Args:
address (int): The address of the tile we should stop. | Below is the the instruction that describes the task:
### Input:
Clear all tasks pertaining to a tile.
This coroutine will synchronously cancel all running tasks that were
attached to the given tile and wait for them to stop before returning.
Args:
address (int): The address of the tile we should stop.
### Response:
async def stop_tasks(self, address):
"""Clear all tasks pertaining to a tile.
This coroutine will synchronously cancel all running tasks that were
attached to the given tile and wait for them to stop before returning.
Args:
address (int): The address of the tile we should stop.
"""
tasks = self._tasks.get(address, [])
for task in tasks:
task.cancel()
asyncio.gather(*tasks, return_exceptions=True)
self._tasks[address] = [] |
def _gevent_patch():
"""Patch the modules with gevent
:return: Default is GEVENT. If it not supports gevent then return MULTITHREAD
:rtype: int
"""
try:
assert gevent
assert grequests
except NameError:
logger.warn('gevent not exist, fallback to multiprocess...')
return MULTITHREAD
else:
monkey.patch_all() # Must patch before get_photos_info
return GEVENT | Patch the modules with gevent
:return: Default is GEVENT. If it not supports gevent then return MULTITHREAD
:rtype: int | Below is the the instruction that describes the task:
### Input:
Patch the modules with gevent
:return: Default is GEVENT. If it not supports gevent then return MULTITHREAD
:rtype: int
### Response:
def _gevent_patch():
"""Patch the modules with gevent
:return: Default is GEVENT. If it not supports gevent then return MULTITHREAD
:rtype: int
"""
try:
assert gevent
assert grequests
except NameError:
logger.warn('gevent not exist, fallback to multiprocess...')
return MULTITHREAD
else:
monkey.patch_all() # Must patch before get_photos_info
return GEVENT |
async def proposal(self):
"""Get the proposal in question.
Actually just the first proposal with the same name, but the
chance of a collision is tiny.
Returns
-------
awaitable of :class:`aionationstates.Proposal`
The proposal submitted.
Raises
------
aionationstates.NotFound
If the proposal has since been withdrawn or promoted.
"""
proposals = await aionationstates.wa.proposals()
for proposal in proposals:
if (proposal.name == self.proposal_name):
return proposal
raise aionationstates.NotFound | Get the proposal in question.
Actually just the first proposal with the same name, but the
chance of a collision is tiny.
Returns
-------
awaitable of :class:`aionationstates.Proposal`
The proposal submitted.
Raises
------
aionationstates.NotFound
If the proposal has since been withdrawn or promoted. | Below is the the instruction that describes the task:
### Input:
Get the proposal in question.
Actually just the first proposal with the same name, but the
chance of a collision is tiny.
Returns
-------
awaitable of :class:`aionationstates.Proposal`
The proposal submitted.
Raises
------
aionationstates.NotFound
If the proposal has since been withdrawn or promoted.
### Response:
async def proposal(self):
"""Get the proposal in question.
Actually just the first proposal with the same name, but the
chance of a collision is tiny.
Returns
-------
awaitable of :class:`aionationstates.Proposal`
The proposal submitted.
Raises
------
aionationstates.NotFound
If the proposal has since been withdrawn or promoted.
"""
proposals = await aionationstates.wa.proposals()
for proposal in proposals:
if (proposal.name == self.proposal_name):
return proposal
raise aionationstates.NotFound |
def _get_image(structure, site):
"""Private convenience method for get_nn_info,
gives lattice image from provided PeriodicSite and Structure.
Image is defined as displacement from original site in structure to a given site.
i.e. if structure has a site at (-0.1, 1.0, 0.3), then (0.9, 0, 2.3) -> jimage = (1, -1, 2).
Note that this method takes O(number of sites) due to searching an original site.
Args:
structure: Structure Object
site: PeriodicSite Object
Returns:
image: ((int)*3) Lattice image
"""
original_site = structure[
NearNeighbors._get_original_site(structure, site)]
image = np.around(
np.subtract(site.frac_coords, original_site.frac_coords))
image = tuple(image.astype(int))
return image | Private convenience method for get_nn_info,
gives lattice image from provided PeriodicSite and Structure.
Image is defined as displacement from original site in structure to a given site.
i.e. if structure has a site at (-0.1, 1.0, 0.3), then (0.9, 0, 2.3) -> jimage = (1, -1, 2).
Note that this method takes O(number of sites) due to searching an original site.
Args:
structure: Structure Object
site: PeriodicSite Object
Returns:
image: ((int)*3) Lattice image | Below is the the instruction that describes the task:
### Input:
Private convenience method for get_nn_info,
gives lattice image from provided PeriodicSite and Structure.
Image is defined as displacement from original site in structure to a given site.
i.e. if structure has a site at (-0.1, 1.0, 0.3), then (0.9, 0, 2.3) -> jimage = (1, -1, 2).
Note that this method takes O(number of sites) due to searching an original site.
Args:
structure: Structure Object
site: PeriodicSite Object
Returns:
image: ((int)*3) Lattice image
### Response:
def _get_image(structure, site):
"""Private convenience method for get_nn_info,
gives lattice image from provided PeriodicSite and Structure.
Image is defined as displacement from original site in structure to a given site.
i.e. if structure has a site at (-0.1, 1.0, 0.3), then (0.9, 0, 2.3) -> jimage = (1, -1, 2).
Note that this method takes O(number of sites) due to searching an original site.
Args:
structure: Structure Object
site: PeriodicSite Object
Returns:
image: ((int)*3) Lattice image
"""
original_site = structure[
NearNeighbors._get_original_site(structure, site)]
image = np.around(
np.subtract(site.frac_coords, original_site.frac_coords))
image = tuple(image.astype(int))
return image |
def print_attention_text(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str],
threshold: float):
"""
Prints the attention matrix to standard out.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param threshold: The threshold for including an alignment link in the result.
"""
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for i, f_i in enumerate(source_tokens): # type: ignore
sys.stdout.write(" |")
for j in range(len(target_tokens)):
align_prob = attention_matrix[j, i]
if align_prob > threshold:
sys.stdout.write("(*)")
elif align_prob > 0.4:
sys.stdout.write("(?)")
else:
sys.stdout.write(" ")
sys.stdout.write(" | %s\n" % f_i)
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for k in range(max(map(len, target_tokens))):
sys.stdout.write(" ")
for word in target_tokens:
letter = word[k] if len(word) > k else " "
sys.stdout.write(" %s " % letter)
sys.stdout.write("\n")
sys.stdout.write("\n") | Prints the attention matrix to standard out.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param threshold: The threshold for including an alignment link in the result. | Below is the the instruction that describes the task:
### Input:
Prints the attention matrix to standard out.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param threshold: The threshold for including an alignment link in the result.
### Response:
def print_attention_text(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str],
threshold: float):
"""
Prints the attention matrix to standard out.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param threshold: The threshold for including an alignment link in the result.
"""
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for i, f_i in enumerate(source_tokens): # type: ignore
sys.stdout.write(" |")
for j in range(len(target_tokens)):
align_prob = attention_matrix[j, i]
if align_prob > threshold:
sys.stdout.write("(*)")
elif align_prob > 0.4:
sys.stdout.write("(?)")
else:
sys.stdout.write(" ")
sys.stdout.write(" | %s\n" % f_i)
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for k in range(max(map(len, target_tokens))):
sys.stdout.write(" ")
for word in target_tokens:
letter = word[k] if len(word) > k else " "
sys.stdout.write(" %s " % letter)
sys.stdout.write("\n")
sys.stdout.write("\n") |
def setup_endpoints(provider):
"""Setup the OpenID Connect Provider endpoints."""
app_routing = {}
endpoints = [
AuthorizationEndpoint(
pyoidcMiddleware(provider.authorization_endpoint)),
TokenEndpoint(
pyoidcMiddleware(provider.token_endpoint)),
UserinfoEndpoint(
pyoidcMiddleware(provider.userinfo_endpoint)),
RegistrationEndpoint(
pyoidcMiddleware(provider.registration_endpoint)),
EndSessionEndpoint(
pyoidcMiddleware(provider.endsession_endpoint))
]
for ep in endpoints:
app_routing["/{}".format(ep.etype)] = ep
return app_routing | Setup the OpenID Connect Provider endpoints. | Below is the the instruction that describes the task:
### Input:
Setup the OpenID Connect Provider endpoints.
### Response:
def setup_endpoints(provider):
"""Setup the OpenID Connect Provider endpoints."""
app_routing = {}
endpoints = [
AuthorizationEndpoint(
pyoidcMiddleware(provider.authorization_endpoint)),
TokenEndpoint(
pyoidcMiddleware(provider.token_endpoint)),
UserinfoEndpoint(
pyoidcMiddleware(provider.userinfo_endpoint)),
RegistrationEndpoint(
pyoidcMiddleware(provider.registration_endpoint)),
EndSessionEndpoint(
pyoidcMiddleware(provider.endsession_endpoint))
]
for ep in endpoints:
app_routing["/{}".format(ep.etype)] = ep
return app_routing |
def hash_(self, keys: Index, salt: int = 0) -> pd.Series:
"""Hashes the given index into an integer index in the range [0, self.stride]
Parameters
----------
keys :
The new index to hash.
salt :
An integer used to perturb the hash in a deterministic way. Useful
in dealing with collisions.
Returns
-------
pd.Series
A pandas series indexed by the given keys and whose values take on integers in
the range [0, self.stride]. Duplicates may appear and should be dealt with
by the calling code.
"""
key_frame = keys.to_frame()
new_map = pd.Series(0, index=keys)
salt = self.convert_to_ten_digit_int(pd.Series(salt, index=keys))
for i, column_name in enumerate(key_frame.columns):
column = self.convert_to_ten_digit_int(key_frame[column_name])
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 27]
out = pd.Series(1, index=column.index)
for idx, p in enumerate(primes):
# numpy will almost always overflow here, but it is equivalent to modding
# out by 2**64. Since it's much much larger than our map size
# the amount of additional periodicity this introduces is pretty trivial.
out *= np.power(p, self.digit(column, idx))
new_map += out + salt
return new_map % self.map_size | Hashes the given index into an integer index in the range [0, self.stride]
Parameters
----------
keys :
The new index to hash.
salt :
An integer used to perturb the hash in a deterministic way. Useful
in dealing with collisions.
Returns
-------
pd.Series
A pandas series indexed by the given keys and whose values take on integers in
the range [0, self.stride]. Duplicates may appear and should be dealt with
by the calling code. | Below is the the instruction that describes the task:
### Input:
Hashes the given index into an integer index in the range [0, self.stride]
Parameters
----------
keys :
The new index to hash.
salt :
An integer used to perturb the hash in a deterministic way. Useful
in dealing with collisions.
Returns
-------
pd.Series
A pandas series indexed by the given keys and whose values take on integers in
the range [0, self.stride]. Duplicates may appear and should be dealt with
by the calling code.
### Response:
def hash_(self, keys: Index, salt: int = 0) -> pd.Series:
"""Hashes the given index into an integer index in the range [0, self.stride]
Parameters
----------
keys :
The new index to hash.
salt :
An integer used to perturb the hash in a deterministic way. Useful
in dealing with collisions.
Returns
-------
pd.Series
A pandas series indexed by the given keys and whose values take on integers in
the range [0, self.stride]. Duplicates may appear and should be dealt with
by the calling code.
"""
key_frame = keys.to_frame()
new_map = pd.Series(0, index=keys)
salt = self.convert_to_ten_digit_int(pd.Series(salt, index=keys))
for i, column_name in enumerate(key_frame.columns):
column = self.convert_to_ten_digit_int(key_frame[column_name])
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 27]
out = pd.Series(1, index=column.index)
for idx, p in enumerate(primes):
# numpy will almost always overflow here, but it is equivalent to modding
# out by 2**64. Since it's much much larger than our map size
# the amount of additional periodicity this introduces is pretty trivial.
out *= np.power(p, self.digit(column, idx))
new_map += out + salt
return new_map % self.map_size |
def _escape_arg(self, arg):
'''
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
'''
if self.winrm:
return arg
return ''.join(['\\' + char if re.match(r'\W', char) else char for char in arg]) | Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric! | Below is the the instruction that describes the task:
### Input:
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
### Response:
def _escape_arg(self, arg):
'''
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
'''
if self.winrm:
return arg
return ''.join(['\\' + char if re.match(r'\W', char) else char for char in arg]) |
def static(
self,
uri,
file_or_directory,
pattern=r"/?.+",
use_modified_since=True,
use_content_range=False,
stream_large_files=False,
name="static",
host=None,
strict_slashes=None,
content_type=None,
):
"""
Register a root to serve files from. The input can either be a
file or a directory. This method will enable an easy and simple way
to setup the :class:`Route` necessary to serve the static files.
:param uri: URL path to be used for serving static content
:param file_or_directory: Path for the Static file/directory with
static files
:param pattern: Regex Pattern identifying the valid static files
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested
:param stream_large_files: If true, use the
:func:`StreamingHTTPResponse.file_stream` handler rather
than the :func:`HTTPResponse.file` handler to send the file.
If this is an integer, this represents the threshold size to
switch to :func:`StreamingHTTPResponse.file_stream`
:param name: user defined name used for url_for
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param content_type: user defined content type for header
:return: None
"""
static_register(
self,
uri,
file_or_directory,
pattern,
use_modified_since,
use_content_range,
stream_large_files,
name,
host,
strict_slashes,
content_type,
) | Register a root to serve files from. The input can either be a
file or a directory. This method will enable an easy and simple way
to setup the :class:`Route` necessary to serve the static files.
:param uri: URL path to be used for serving static content
:param file_or_directory: Path for the Static file/directory with
static files
:param pattern: Regex Pattern identifying the valid static files
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested
:param stream_large_files: If true, use the
:func:`StreamingHTTPResponse.file_stream` handler rather
than the :func:`HTTPResponse.file` handler to send the file.
If this is an integer, this represents the threshold size to
switch to :func:`StreamingHTTPResponse.file_stream`
:param name: user defined name used for url_for
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param content_type: user defined content type for header
:return: None | Below is the the instruction that describes the task:
### Input:
Register a root to serve files from. The input can either be a
file or a directory. This method will enable an easy and simple way
to setup the :class:`Route` necessary to serve the static files.
:param uri: URL path to be used for serving static content
:param file_or_directory: Path for the Static file/directory with
static files
:param pattern: Regex Pattern identifying the valid static files
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested
:param stream_large_files: If true, use the
:func:`StreamingHTTPResponse.file_stream` handler rather
than the :func:`HTTPResponse.file` handler to send the file.
If this is an integer, this represents the threshold size to
switch to :func:`StreamingHTTPResponse.file_stream`
:param name: user defined name used for url_for
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param content_type: user defined content type for header
:return: None
### Response:
def static(
self,
uri,
file_or_directory,
pattern=r"/?.+",
use_modified_since=True,
use_content_range=False,
stream_large_files=False,
name="static",
host=None,
strict_slashes=None,
content_type=None,
):
"""
Register a root to serve files from. The input can either be a
file or a directory. This method will enable an easy and simple way
to setup the :class:`Route` necessary to serve the static files.
:param uri: URL path to be used for serving static content
:param file_or_directory: Path for the Static file/directory with
static files
:param pattern: Regex Pattern identifying the valid static files
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested
:param stream_large_files: If true, use the
:func:`StreamingHTTPResponse.file_stream` handler rather
than the :func:`HTTPResponse.file` handler to send the file.
If this is an integer, this represents the threshold size to
switch to :func:`StreamingHTTPResponse.file_stream`
:param name: user defined name used for url_for
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param content_type: user defined content type for header
:return: None
"""
static_register(
self,
uri,
file_or_directory,
pattern,
use_modified_since,
use_content_range,
stream_large_files,
name,
host,
strict_slashes,
content_type,
) |
def remove(self, timer):
"""Remove a timer from the heap, return True if already run"""
with self.lock:
# This is somewhat expensive as we have to heapify.
if timer in self.timers:
self._remove(timer)
return False
else:
return True | Remove a timer from the heap, return True if already run | Below is the the instruction that describes the task:
### Input:
Remove a timer from the heap, return True if already run
### Response:
def remove(self, timer):
"""Remove a timer from the heap, return True if already run"""
with self.lock:
# This is somewhat expensive as we have to heapify.
if timer in self.timers:
self._remove(timer)
return False
else:
return True |
def flow_actual(self, Row_Index_Submerged, N_LFOM_Orifices):
"""Calculates the flow for a given number of submerged rows of orifices
harray is the distance from the water level to the center of the
orifices when the water is at the max level.
Parameters
----------
Row_Index_Submerged: int
The index of the submerged row. All rows below and including this
index are submerged.
N_LFOM_Orifices: [int]
The number of orifices at each row.
Returns
--------
The flow through all of the orifices that are submerged.
"""
flow = 0
for i in range(Row_Index_Submerged + 1):
flow = flow + (N_LFOM_Orifices[i] * (
pc.flow_orifice_vert(self.orifice_diameter,
self.b_rows*(Row_Index_Submerged + 1)
- self.height_orifices[i],
con.VC_ORIFICE_RATIO)))
return flow | Calculates the flow for a given number of submerged rows of orifices
harray is the distance from the water level to the center of the
orifices when the water is at the max level.
Parameters
----------
Row_Index_Submerged: int
The index of the submerged row. All rows below and including this
index are submerged.
N_LFOM_Orifices: [int]
The number of orifices at each row.
Returns
--------
The flow through all of the orifices that are submerged. | Below is the the instruction that describes the task:
### Input:
Calculates the flow for a given number of submerged rows of orifices
harray is the distance from the water level to the center of the
orifices when the water is at the max level.
Parameters
----------
Row_Index_Submerged: int
The index of the submerged row. All rows below and including this
index are submerged.
N_LFOM_Orifices: [int]
The number of orifices at each row.
Returns
--------
The flow through all of the orifices that are submerged.
### Response:
def flow_actual(self, Row_Index_Submerged, N_LFOM_Orifices):
"""Calculates the flow for a given number of submerged rows of orifices
harray is the distance from the water level to the center of the
orifices when the water is at the max level.
Parameters
----------
Row_Index_Submerged: int
The index of the submerged row. All rows below and including this
index are submerged.
N_LFOM_Orifices: [int]
The number of orifices at each row.
Returns
--------
The flow through all of the orifices that are submerged.
"""
flow = 0
for i in range(Row_Index_Submerged + 1):
flow = flow + (N_LFOM_Orifices[i] * (
pc.flow_orifice_vert(self.orifice_diameter,
self.b_rows*(Row_Index_Submerged + 1)
- self.height_orifices[i],
con.VC_ORIFICE_RATIO)))
return flow |
def initialize_ray():
"""Initializes ray based on environment variables and internal defaults."""
if threading.current_thread().name == "MainThread":
plasma_directory = None
object_store_memory = os.environ.get("MODIN_MEMORY", None)
if os.environ.get("MODIN_OUT_OF_CORE", "False").title() == "True":
from tempfile import gettempdir
plasma_directory = gettempdir()
# We may have already set the memory from the environment variable, we don't
# want to overwrite that value if we have.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
# Default to 8x memory for out of core
object_store_memory = 8 * mem_bytes
# In case anything failed above, we can still improve the memory for Modin.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
object_store_memory = int(
0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
)
# If the memory pool is smaller than 2GB, just use the default in ray.
if object_store_memory == 0:
object_store_memory = None
else:
object_store_memory = int(object_store_memory)
ray.init(
include_webui=False,
ignore_reinit_error=True,
plasma_directory=plasma_directory,
object_store_memory=object_store_memory,
)
# Register custom serializer for method objects to avoid warning message.
# We serialize `MethodType` objects when we use AxisPartition operations.
ray.register_custom_serializer(types.MethodType, use_pickle=True) | Initializes ray based on environment variables and internal defaults. | Below is the the instruction that describes the task:
### Input:
Initializes ray based on environment variables and internal defaults.
### Response:
def initialize_ray():
"""Initializes ray based on environment variables and internal defaults."""
if threading.current_thread().name == "MainThread":
plasma_directory = None
object_store_memory = os.environ.get("MODIN_MEMORY", None)
if os.environ.get("MODIN_OUT_OF_CORE", "False").title() == "True":
from tempfile import gettempdir
plasma_directory = gettempdir()
# We may have already set the memory from the environment variable, we don't
# want to overwrite that value if we have.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
# Default to 8x memory for out of core
object_store_memory = 8 * mem_bytes
# In case anything failed above, we can still improve the memory for Modin.
if object_store_memory is None:
# Round down to the nearest Gigabyte.
object_store_memory = int(
0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
)
# If the memory pool is smaller than 2GB, just use the default in ray.
if object_store_memory == 0:
object_store_memory = None
else:
object_store_memory = int(object_store_memory)
ray.init(
include_webui=False,
ignore_reinit_error=True,
plasma_directory=plasma_directory,
object_store_memory=object_store_memory,
)
# Register custom serializer for method objects to avoid warning message.
# We serialize `MethodType` objects when we use AxisPartition operations.
ray.register_custom_serializer(types.MethodType, use_pickle=True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.