code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def create_or_update(sender, **kwargs):
"""
Create or update an Activity Monitor item from some instance.
"""
now = datetime.datetime.now()
# I can't explain why this import fails unless it's here.
from activity_monitor.models import Activity
instance = kwargs['instance']
# Find this object's content type and model class.
instance_content_type = ContentType.objects.get_for_model(sender)
instance_model = sender
content_object = instance_model.objects.get(id=instance.id)
# check to see if the activity already exists. Will need later.
try:
activity = Activity.objects.get(content_type=instance_content_type, object_id=content_object.id)
except:
activity = None
# We now know the content type, the model (sender), content type and content object.
# We need to loop through ACTIVITY_MONITOR_MODELS in settings for other fields
for activity_setting in settings.ACTIVITY_MONITOR_MODELS:
this_app_label = activity_setting['model'].split('.')[0]
this_model_label = activity_setting['model'].split('.')[1]
this_content_type = ContentType.objects.get(app_label=this_app_label, model=this_model_label)
if this_content_type == instance_content_type:
# first, check to see if we even WANT to register this activity.
# use the boolean 'check' field. Also, delete if needed.
if 'check' in activity_setting:
if getattr(instance, activity_setting['check']) is False:
if activity:
activity.delete()
return
# does it use the default manager (objects) or a custom manager?
try:
manager = activity_setting['manager']
except:
manager = 'objects'
# what field denotes the activity time? created is default
try:
timestamp = getattr(instance, activity_setting['date_field'])
except:
timestamp = getattr(instance, 'created')
# if the given time stamp is a daterather than datetime type,
# normalize it out to a datetime
if type(timestamp) == type(now):
clean_timestamp = timestamp
else:
clean_timestamp = datetime.datetime.combine(timestamp, datetime.time())
# Find a valid user object
if 'user_field' in activity_setting: # pull the user object from instance using user_field
user = getattr(instance, activity_setting['user_field'])
elif this_model_label == 'user' or this_model_label == 'profile': # this IS auth.user or a Django 1.5 custom user
user = instance
else: # we didn't specify a user, so it must be instance.user
user = instance.user
# BAIL-OUT CHECKS
# Determine all the reasons we would want to bail out.
# Make sure it's not a future item, like a future-published blog entry.
if clean_timestamp > now:
return
# or some really old content that was just re-saved for some reason
if clean_timestamp < (now - datetime.timedelta(days=3)):
return
# or there's not a user object
if not user:
return
# or the user is god or staff, and we're filtering out, don't add to monitor
if user.is_superuser and 'filter_superuser' in activity_setting:
return
if user.is_staff and 'filter_staff' in activity_setting:
return
# build a default string representation
# note that each activity can get back to the object via get_absolute_url()
verb = activity_setting.get('verb', None)
override_string = activity_setting.get('override_string', None)
# MANAGER CHECK
# Make sure the item "should" be registered, based on the manager argument.
# If InstanceModel.manager.all() includes this item, then register. Otherwise, return.
# Also, check to see if it should be deleted.
try:
getattr(instance_model, manager).get(pk=instance.pk)
except instance_model.DoesNotExist:
try:
activity.delete()
return
except Activity.DoesNotExist:
return
if user and clean_timestamp and instance:
if not activity: # If the activity didn't already exist, create it.
activity = Activity(
actor = user,
content_type = instance_content_type,
object_id = content_object.id,
content_object = content_object,
timestamp = clean_timestamp,
verb = verb,
override_string = override_string,
)
activity.save()
return activity | Create or update an Activity Monitor item from some instance. | Below is the the instruction that describes the task:
### Input:
Create or update an Activity Monitor item from some instance.
### Response:
def create_or_update(sender, **kwargs):
"""
Create or update an Activity Monitor item from some instance.
"""
now = datetime.datetime.now()
# I can't explain why this import fails unless it's here.
from activity_monitor.models import Activity
instance = kwargs['instance']
# Find this object's content type and model class.
instance_content_type = ContentType.objects.get_for_model(sender)
instance_model = sender
content_object = instance_model.objects.get(id=instance.id)
# check to see if the activity already exists. Will need later.
try:
activity = Activity.objects.get(content_type=instance_content_type, object_id=content_object.id)
except:
activity = None
# We now know the content type, the model (sender), content type and content object.
# We need to loop through ACTIVITY_MONITOR_MODELS in settings for other fields
for activity_setting in settings.ACTIVITY_MONITOR_MODELS:
this_app_label = activity_setting['model'].split('.')[0]
this_model_label = activity_setting['model'].split('.')[1]
this_content_type = ContentType.objects.get(app_label=this_app_label, model=this_model_label)
if this_content_type == instance_content_type:
# first, check to see if we even WANT to register this activity.
# use the boolean 'check' field. Also, delete if needed.
if 'check' in activity_setting:
if getattr(instance, activity_setting['check']) is False:
if activity:
activity.delete()
return
# does it use the default manager (objects) or a custom manager?
try:
manager = activity_setting['manager']
except:
manager = 'objects'
# what field denotes the activity time? created is default
try:
timestamp = getattr(instance, activity_setting['date_field'])
except:
timestamp = getattr(instance, 'created')
# if the given time stamp is a daterather than datetime type,
# normalize it out to a datetime
if type(timestamp) == type(now):
clean_timestamp = timestamp
else:
clean_timestamp = datetime.datetime.combine(timestamp, datetime.time())
# Find a valid user object
if 'user_field' in activity_setting: # pull the user object from instance using user_field
user = getattr(instance, activity_setting['user_field'])
elif this_model_label == 'user' or this_model_label == 'profile': # this IS auth.user or a Django 1.5 custom user
user = instance
else: # we didn't specify a user, so it must be instance.user
user = instance.user
# BAIL-OUT CHECKS
# Determine all the reasons we would want to bail out.
# Make sure it's not a future item, like a future-published blog entry.
if clean_timestamp > now:
return
# or some really old content that was just re-saved for some reason
if clean_timestamp < (now - datetime.timedelta(days=3)):
return
# or there's not a user object
if not user:
return
# or the user is god or staff, and we're filtering out, don't add to monitor
if user.is_superuser and 'filter_superuser' in activity_setting:
return
if user.is_staff and 'filter_staff' in activity_setting:
return
# build a default string representation
# note that each activity can get back to the object via get_absolute_url()
verb = activity_setting.get('verb', None)
override_string = activity_setting.get('override_string', None)
# MANAGER CHECK
# Make sure the item "should" be registered, based on the manager argument.
# If InstanceModel.manager.all() includes this item, then register. Otherwise, return.
# Also, check to see if it should be deleted.
try:
getattr(instance_model, manager).get(pk=instance.pk)
except instance_model.DoesNotExist:
try:
activity.delete()
return
except Activity.DoesNotExist:
return
if user and clean_timestamp and instance:
if not activity: # If the activity didn't already exist, create it.
activity = Activity(
actor = user,
content_type = instance_content_type,
object_id = content_object.id,
content_object = content_object,
timestamp = clean_timestamp,
verb = verb,
override_string = override_string,
)
activity.save()
return activity |
def extract_locals(trcback):
"""
Extracts the frames locals of given traceback.
:param trcback: Traceback.
:type trcback: Traceback
:return: Frames locals.
:rtype: list
"""
output = []
stack = extract_stack(get_inner_most_frame(trcback))
for frame, file_name, line_number, name, context, index in stack:
args_names, nameless, keyword = extract_arguments(frame)
arguments, nameless_args, keyword_args, locals = OrderedDict(), [], {}, {}
for key, data in frame.f_locals.iteritems():
if key == nameless:
nameless_args = map(repr, frame.f_locals.get(nameless, ()))
elif key == keyword:
keyword_args = dict((arg, repr(value)) for arg, value in frame.f_locals.get(keyword, {}).iteritems())
elif key in args_names:
arguments[key] = repr(data)
else:
locals[key] = repr(data)
output.append(((name, file_name, line_number), (arguments, nameless_args, keyword_args, locals)))
return output | Extracts the frames locals of given traceback.
:param trcback: Traceback.
:type trcback: Traceback
:return: Frames locals.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Extracts the frames locals of given traceback.
:param trcback: Traceback.
:type trcback: Traceback
:return: Frames locals.
:rtype: list
### Response:
def extract_locals(trcback):
"""
Extracts the frames locals of given traceback.
:param trcback: Traceback.
:type trcback: Traceback
:return: Frames locals.
:rtype: list
"""
output = []
stack = extract_stack(get_inner_most_frame(trcback))
for frame, file_name, line_number, name, context, index in stack:
args_names, nameless, keyword = extract_arguments(frame)
arguments, nameless_args, keyword_args, locals = OrderedDict(), [], {}, {}
for key, data in frame.f_locals.iteritems():
if key == nameless:
nameless_args = map(repr, frame.f_locals.get(nameless, ()))
elif key == keyword:
keyword_args = dict((arg, repr(value)) for arg, value in frame.f_locals.get(keyword, {}).iteritems())
elif key in args_names:
arguments[key] = repr(data)
else:
locals[key] = repr(data)
output.append(((name, file_name, line_number), (arguments, nameless_args, keyword_args, locals)))
return output |
def _cleanPictures(self):
"""
Delete unused images
"""
# Project have been deleted
if not os.path.exists(self.path):
return
try:
pictures = set(os.listdir(self.pictures_directory))
for drawing in self._drawings.values():
try:
pictures.remove(drawing.ressource_filename)
except KeyError:
pass
for pict in pictures:
os.remove(os.path.join(self.pictures_directory, pict))
except OSError as e:
log.warning(str(e)) | Delete unused images | Below is the the instruction that describes the task:
### Input:
Delete unused images
### Response:
def _cleanPictures(self):
"""
Delete unused images
"""
# Project have been deleted
if not os.path.exists(self.path):
return
try:
pictures = set(os.listdir(self.pictures_directory))
for drawing in self._drawings.values():
try:
pictures.remove(drawing.ressource_filename)
except KeyError:
pass
for pict in pictures:
os.remove(os.path.join(self.pictures_directory, pict))
except OSError as e:
log.warning(str(e)) |
def active_serving_watcher(backend, kitchen, period):
"""
Watches all cooking Recipes in a Kitchen
Provide the kitchen name as an argument or be in a Kitchen folder.
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
click.secho('%s - Watching Active OrderRun Changes in Kitchen %s' % (get_datetime(), use_kitchen), fg='green')
DKCloudCommandRunner.watch_active_servings(backend.dki, use_kitchen, period)
while True:
try:
DKCloudCommandRunner.join_active_serving_watcher_thread_join()
if not DKCloudCommandRunner.watcher_running():
break
except KeyboardInterrupt:
print 'KeyboardInterrupt'
exit_gracefully(None, None)
exit(0) | Watches all cooking Recipes in a Kitchen
Provide the kitchen name as an argument or be in a Kitchen folder. | Below is the the instruction that describes the task:
### Input:
Watches all cooking Recipes in a Kitchen
Provide the kitchen name as an argument or be in a Kitchen folder.
### Response:
def active_serving_watcher(backend, kitchen, period):
"""
Watches all cooking Recipes in a Kitchen
Provide the kitchen name as an argument or be in a Kitchen folder.
"""
err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen)
if use_kitchen is None:
raise click.ClickException(err_str)
click.secho('%s - Watching Active OrderRun Changes in Kitchen %s' % (get_datetime(), use_kitchen), fg='green')
DKCloudCommandRunner.watch_active_servings(backend.dki, use_kitchen, period)
while True:
try:
DKCloudCommandRunner.join_active_serving_watcher_thread_join()
if not DKCloudCommandRunner.watcher_running():
break
except KeyboardInterrupt:
print 'KeyboardInterrupt'
exit_gracefully(None, None)
exit(0) |
def path(string):
"""
Define the 'path' data type that can be used by apps.
"""
if not os.path.exists(string):
msg = "Path %s not found!" % string
raise ArgumentTypeError(msg)
return string | Define the 'path' data type that can be used by apps. | Below is the the instruction that describes the task:
### Input:
Define the 'path' data type that can be used by apps.
### Response:
def path(string):
"""
Define the 'path' data type that can be used by apps.
"""
if not os.path.exists(string):
msg = "Path %s not found!" % string
raise ArgumentTypeError(msg)
return string |
def enum(option, *options):
"""
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
"""
options = (option,) + options
rangeob = range(len(options))
try:
inttype = _inttypes[int(np.log2(len(options) - 1)) // 8]
except IndexError:
raise OverflowError(
'Cannot store enums with more than sys.maxsize elements, got %d' %
len(options),
)
class _enum(Structure):
_fields_ = [(o, inttype) for o in options]
def __iter__(self):
return iter(rangeob)
def __contains__(self, value):
return 0 <= value < len(options)
def __repr__(self):
return '<enum: %s>' % (
('%d fields' % len(options))
if len(options) > 10 else
repr(options)
)
return _enum(*rangeob) | Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)" | Below is the the instruction that describes the task:
### Input:
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
### Response:
def enum(option, *options):
"""
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
"""
options = (option,) + options
rangeob = range(len(options))
try:
inttype = _inttypes[int(np.log2(len(options) - 1)) // 8]
except IndexError:
raise OverflowError(
'Cannot store enums with more than sys.maxsize elements, got %d' %
len(options),
)
class _enum(Structure):
_fields_ = [(o, inttype) for o in options]
def __iter__(self):
return iter(rangeob)
def __contains__(self, value):
return 0 <= value < len(options)
def __repr__(self):
return '<enum: %s>' % (
('%d fields' % len(options))
if len(options) > 10 else
repr(options)
)
return _enum(*rangeob) |
def chi_eff_from_spherical(mass1, mass2, spin1_a, spin1_polar,
spin2_a, spin2_polar):
"""Returns the effective spin using spins in spherical coordinates."""
spin1z = spin1_a * numpy.cos(spin1_polar)
spin2z = spin2_a * numpy.cos(spin2_polar)
return chi_eff(mass1, mass2, spin1z, spin2z) | Returns the effective spin using spins in spherical coordinates. | Below is the the instruction that describes the task:
### Input:
Returns the effective spin using spins in spherical coordinates.
### Response:
def chi_eff_from_spherical(mass1, mass2, spin1_a, spin1_polar,
spin2_a, spin2_polar):
"""Returns the effective spin using spins in spherical coordinates."""
spin1z = spin1_a * numpy.cos(spin1_polar)
spin2z = spin2_a * numpy.cos(spin2_polar)
return chi_eff(mass1, mass2, spin1z, spin2z) |
def end_effector(self):
""" Returns the cartesian position of the end of the chain (in meters). """
angles = self.convert_to_ik_angles(self.joints_position)
return self.forward_kinematics(angles)[:3, 3] | Returns the cartesian position of the end of the chain (in meters). | Below is the the instruction that describes the task:
### Input:
Returns the cartesian position of the end of the chain (in meters).
### Response:
def end_effector(self):
""" Returns the cartesian position of the end of the chain (in meters). """
angles = self.convert_to_ik_angles(self.joints_position)
return self.forward_kinematics(angles)[:3, 3] |
def assign_objective_to_objective_bank(self, objective_id, objective_bank_id):
"""Adds an existing ``Objective`` to an ``ObjectiveBank``.
arg: objective_id (osid.id.Id): the ``Id`` of the
``Objective``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: AlreadyExists - ``objective_id`` already mapped to
``objective_bank_id``
raise: NotFound - ``objective_id`` or ``objective_bank_id`` not
found
raise: NullArgument - ``objective_id`` or ``objective_bank_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_objective_bank_lookup_session(proxy=self._proxy)
lookup_session.get_objective_bank(objective_bank_id) # to raise NotFound
self._assign_object_to_catalog(objective_id, objective_bank_id) | Adds an existing ``Objective`` to an ``ObjectiveBank``.
arg: objective_id (osid.id.Id): the ``Id`` of the
``Objective``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: AlreadyExists - ``objective_id`` already mapped to
``objective_bank_id``
raise: NotFound - ``objective_id`` or ``objective_bank_id`` not
found
raise: NullArgument - ``objective_id`` or ``objective_bank_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Adds an existing ``Objective`` to an ``ObjectiveBank``.
arg: objective_id (osid.id.Id): the ``Id`` of the
``Objective``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: AlreadyExists - ``objective_id`` already mapped to
``objective_bank_id``
raise: NotFound - ``objective_id`` or ``objective_bank_id`` not
found
raise: NullArgument - ``objective_id`` or ``objective_bank_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def assign_objective_to_objective_bank(self, objective_id, objective_bank_id):
"""Adds an existing ``Objective`` to an ``ObjectiveBank``.
arg: objective_id (osid.id.Id): the ``Id`` of the
``Objective``
arg: objective_bank_id (osid.id.Id): the ``Id`` of the
``ObjectiveBank``
raise: AlreadyExists - ``objective_id`` already mapped to
``objective_bank_id``
raise: NotFound - ``objective_id`` or ``objective_bank_id`` not
found
raise: NullArgument - ``objective_id`` or ``objective_bank_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
mgr = self._get_provider_manager('LEARNING', local=True)
lookup_session = mgr.get_objective_bank_lookup_session(proxy=self._proxy)
lookup_session.get_objective_bank(objective_bank_id) # to raise NotFound
self._assign_object_to_catalog(objective_id, objective_bank_id) |
def format_file(filename, args, standard_out):
"""Run format_code() on a file.
Returns `True` if any changes are needed and they are not being done
in-place.
"""
encoding = detect_encoding(filename)
with open_with_encoding(filename, encoding=encoding) as input_file:
source = input_file.read()
formatted_source = format_code(
source,
preferred_quote=args.quote)
if source != formatted_source:
if args.in_place:
with open_with_encoding(filename, mode='w',
encoding=encoding) as output_file:
output_file.write(formatted_source)
else:
import difflib
diff = difflib.unified_diff(
source.splitlines(),
formatted_source.splitlines(),
'before/' + filename,
'after/' + filename,
lineterm='')
standard_out.write('\n'.join(list(diff) + ['']))
return True | Run format_code() on a file.
Returns `True` if any changes are needed and they are not being done
in-place. | Below is the the instruction that describes the task:
### Input:
Run format_code() on a file.
Returns `True` if any changes are needed and they are not being done
in-place.
### Response:
def format_file(filename, args, standard_out):
"""Run format_code() on a file.
Returns `True` if any changes are needed and they are not being done
in-place.
"""
encoding = detect_encoding(filename)
with open_with_encoding(filename, encoding=encoding) as input_file:
source = input_file.read()
formatted_source = format_code(
source,
preferred_quote=args.quote)
if source != formatted_source:
if args.in_place:
with open_with_encoding(filename, mode='w',
encoding=encoding) as output_file:
output_file.write(formatted_source)
else:
import difflib
diff = difflib.unified_diff(
source.splitlines(),
formatted_source.splitlines(),
'before/' + filename,
'after/' + filename,
lineterm='')
standard_out.write('\n'.join(list(diff) + ['']))
return True |
def all_month_events(self, year, month, category=None, tag=None,
loc=False, cncl=False):
"""
Returns all events that have an occurrence within the given
month & year.
"""
kwargs = self._get_kwargs(category, tag)
ym_first, ym_last = self.get_first_and_last(year, month)
pref = []
if loc:
pref.append("location")
if cncl:
pref.append("cancellations")
# for yearly repeat, we need to check the start and end date months
# b/c yearly events should occur every year in the same month
r = Q(repeat="YEARLY")
dstart_mo = Q(start_date__month=month)
dend_mo = Q(end_date__month=month)
dstart_yr = Q(start_date__year=year)
dend_yr = Q(end_date__year=year)
return self.model.objects.filter(
# only events that are still repeating
r & (dstart_mo | dend_mo) | # yearly repeat
(~Q(repeat="NEVER")) | # all other repeats
((dstart_yr | dend_yr) & (dstart_mo | dend_yr)), # non-repeating
Q(end_repeat=None) | Q(end_repeat__gte=ym_first),
start_date__lte=ym_last # no events that haven't started yet
).filter(**kwargs).prefetch_related(*pref).order_by('start_date').distinct() | Returns all events that have an occurrence within the given
month & year. | Below is the the instruction that describes the task:
### Input:
Returns all events that have an occurrence within the given
month & year.
### Response:
def all_month_events(self, year, month, category=None, tag=None,
loc=False, cncl=False):
"""
Returns all events that have an occurrence within the given
month & year.
"""
kwargs = self._get_kwargs(category, tag)
ym_first, ym_last = self.get_first_and_last(year, month)
pref = []
if loc:
pref.append("location")
if cncl:
pref.append("cancellations")
# for yearly repeat, we need to check the start and end date months
# b/c yearly events should occur every year in the same month
r = Q(repeat="YEARLY")
dstart_mo = Q(start_date__month=month)
dend_mo = Q(end_date__month=month)
dstart_yr = Q(start_date__year=year)
dend_yr = Q(end_date__year=year)
return self.model.objects.filter(
# only events that are still repeating
r & (dstart_mo | dend_mo) | # yearly repeat
(~Q(repeat="NEVER")) | # all other repeats
((dstart_yr | dend_yr) & (dstart_mo | dend_yr)), # non-repeating
Q(end_repeat=None) | Q(end_repeat__gte=ym_first),
start_date__lte=ym_last # no events that haven't started yet
).filter(**kwargs).prefetch_related(*pref).order_by('start_date').distinct() |
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK) | Helper method to clean up DAG file processors to avoid leaving orphan processes. | Below is the the instruction that describes the task:
### Input:
Helper method to clean up DAG file processors to avoid leaving orphan processes.
### Response:
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK) |
def parseinput(inputlist,outputname=None, atfile=None):
"""
Recursively parse user input based upon the irafglob
program and construct a list of files that need to be processed.
This program addresses the following deficiencies of the irafglob program::
parseinput can extract filenames from association tables
Returns
-------
This program will return a list of input files that will need to
be processed in addition to the name of any outfiles specified in
an association table.
Parameters
----------
inputlist - string
specification of input files using either wild-cards, @-file or
comma-separated list of filenames
outputname - string
desired name for output product to be created from the input files
atfile - object
function to use in interpreting the @-file columns that gets passed to irafglob
Returns
-------
files - list of strings
names of output files to be processed
newoutputname - string
name of output file to be created.
See Also
--------
stsci.tools.irafglob
"""
# Initalize some variables
files = [] # list used to store names of input files
newoutputname = outputname # Outputname returned to calling program.
# The value of outputname is only changed
# if it had a value of 'None' on input.
# We can use irafglob to parse the input. If the input wasn't
# an association table, it needs to be either a wildcard, '@' file,
# or comma seperated list.
files = irafglob(inputlist, atfile=atfile)
# Now that we have expanded the inputlist into a python list
# containing the list of input files, it is necessary to examine
# each of the files to make sure none of them are association tables.
#
# If an association table is found, the entries should be read
# Determine if the input is an association table
for file in files:
if (checkASN(file) == True):
# Create a list to store the files extracted from the
# association tiable
assoclist = []
# The input is an association table
try:
# Open the association table
assocdict = readASNTable(file, None, prodonly=False)
except:
errorstr = "###################################\n"
errorstr += "# #\n"
errorstr += "# UNABLE TO READ ASSOCIATION FILE,#\n"
errorstr += str(file)+'\n'
errorstr += "# DURING FILE PARSING. #\n"
errorstr += "# #\n"
errorstr += "# Please determine if the file is #\n"
errorstr += "# in the current directory and #\n"
errorstr += "# that it has been properly #\n"
errorstr += "# formatted. #\n"
errorstr += "# #\n"
errorstr += "# This error message is being #\n"
errorstr += "# generated from within the #\n"
errorstr += "# parseinput.py module. #\n"
errorstr += "# #\n"
errorstr += "###################################\n"
raise ValueError(errorstr)
# Extract the output name from the association table if None
# was provided on input.
if outputname is None:
newoutputname = assocdict['output']
# Loop over the association dictionary to extract the input
# file names.
for f in assocdict['order']:
assoclist.append(fileutil.buildRootname(f))
# Remove the name of the association table from the list of files
files.remove(file)
# Append the list of filenames generated from the association table
# to the master list of input files.
files.extend(assoclist)
# Return the list of the input files and the output name if provided in an association.
return files, newoutputname | Recursively parse user input based upon the irafglob
program and construct a list of files that need to be processed.
This program addresses the following deficiencies of the irafglob program::
parseinput can extract filenames from association tables
Returns
-------
This program will return a list of input files that will need to
be processed in addition to the name of any outfiles specified in
an association table.
Parameters
----------
inputlist - string
specification of input files using either wild-cards, @-file or
comma-separated list of filenames
outputname - string
desired name for output product to be created from the input files
atfile - object
function to use in interpreting the @-file columns that gets passed to irafglob
Returns
-------
files - list of strings
names of output files to be processed
newoutputname - string
name of output file to be created.
See Also
--------
stsci.tools.irafglob | Below is the the instruction that describes the task:
### Input:
Recursively parse user input based upon the irafglob
program and construct a list of files that need to be processed.
This program addresses the following deficiencies of the irafglob program::
parseinput can extract filenames from association tables
Returns
-------
This program will return a list of input files that will need to
be processed in addition to the name of any outfiles specified in
an association table.
Parameters
----------
inputlist - string
specification of input files using either wild-cards, @-file or
comma-separated list of filenames
outputname - string
desired name for output product to be created from the input files
atfile - object
function to use in interpreting the @-file columns that gets passed to irafglob
Returns
-------
files - list of strings
names of output files to be processed
newoutputname - string
name of output file to be created.
See Also
--------
stsci.tools.irafglob
### Response:
def parseinput(inputlist,outputname=None, atfile=None):
"""
Recursively parse user input based upon the irafglob
program and construct a list of files that need to be processed.
This program addresses the following deficiencies of the irafglob program::
parseinput can extract filenames from association tables
Returns
-------
This program will return a list of input files that will need to
be processed in addition to the name of any outfiles specified in
an association table.
Parameters
----------
inputlist - string
specification of input files using either wild-cards, @-file or
comma-separated list of filenames
outputname - string
desired name for output product to be created from the input files
atfile - object
function to use in interpreting the @-file columns that gets passed to irafglob
Returns
-------
files - list of strings
names of output files to be processed
newoutputname - string
name of output file to be created.
See Also
--------
stsci.tools.irafglob
"""
# Initalize some variables
files = [] # list used to store names of input files
newoutputname = outputname # Outputname returned to calling program.
# The value of outputname is only changed
# if it had a value of 'None' on input.
# We can use irafglob to parse the input. If the input wasn't
# an association table, it needs to be either a wildcard, '@' file,
# or comma seperated list.
files = irafglob(inputlist, atfile=atfile)
# Now that we have expanded the inputlist into a python list
# containing the list of input files, it is necessary to examine
# each of the files to make sure none of them are association tables.
#
# If an association table is found, the entries should be read
# Determine if the input is an association table
for file in files:
if (checkASN(file) == True):
# Create a list to store the files extracted from the
# association tiable
assoclist = []
# The input is an association table
try:
# Open the association table
assocdict = readASNTable(file, None, prodonly=False)
except:
errorstr = "###################################\n"
errorstr += "# #\n"
errorstr += "# UNABLE TO READ ASSOCIATION FILE,#\n"
errorstr += str(file)+'\n'
errorstr += "# DURING FILE PARSING. #\n"
errorstr += "# #\n"
errorstr += "# Please determine if the file is #\n"
errorstr += "# in the current directory and #\n"
errorstr += "# that it has been properly #\n"
errorstr += "# formatted. #\n"
errorstr += "# #\n"
errorstr += "# This error message is being #\n"
errorstr += "# generated from within the #\n"
errorstr += "# parseinput.py module. #\n"
errorstr += "# #\n"
errorstr += "###################################\n"
raise ValueError(errorstr)
# Extract the output name from the association table if None
# was provided on input.
if outputname is None:
newoutputname = assocdict['output']
# Loop over the association dictionary to extract the input
# file names.
for f in assocdict['order']:
assoclist.append(fileutil.buildRootname(f))
# Remove the name of the association table from the list of files
files.remove(file)
# Append the list of filenames generated from the association table
# to the master list of input files.
files.extend(assoclist)
# Return the list of the input files and the output name if provided in an association.
return files, newoutputname |
def random_lattice_box(mol_list, mol_number, size,
spacing=np.array([0.3, 0.3, 0.3])):
'''Make a box by placing the molecules specified in *mol_list* on
random points of an evenly spaced lattice.
Using a lattice automatically ensures that no two molecules are
overlapping.
**Parameters**
mol_list: list of Molecule instances
A list of each kind of molecules to add to the system.
mol_number: list of int
The number of molecules to place for each kind.
size: np.ndarray((3,), float)
The box size in nm
spacing: np.ndarray((3,), float), [0.3 0.3 0.3]
The lattice spacing in nm.
**Returns**
A System instance.
**Example**
Typical box with 1000 water molecules randomly placed in a box of size
``[2.0 2.0 2.0]``::
from chemlab.db import ChemlabDB
# Example water molecule
water = ChemlabDB().get('molecule', 'example.water')
s = random_water_box([water], [1000], [2.0, 2.0, 2.0])
'''
# Generate the coordinates
positions = spaced_lattice(size, spacing)
# Randomize them
np.random.shuffle(positions)
n_mol = sum(mol_number)
n_atoms = sum(nmol*mol.n_atoms for mol, nmol in zip(mol_list, mol_number))
# Assert that we have enough space
assert len(positions) >= n_mol, "Can't fit {} molecules in {} spaces".format(n_mol,
len(positions))
box_vectors = np.zeros((3, 3))
box_vectors[0,0] = size[0]
box_vectors[1,1] = size[1]
box_vectors[2,2] = size[2]
# Initialize a system
s = System.empty()
with s.batch() as b:
mol_list = [m.copy() for m in mol_list]
# Add the molecules
pi = 0
for i, mol in enumerate(mol_list):
for j in range(mol_number[i]):
mol.move_to(positions[pi])
b.append(mol.copy())
pi += 1
return s | Make a box by placing the molecules specified in *mol_list* on
random points of an evenly spaced lattice.
Using a lattice automatically ensures that no two molecules are
overlapping.
**Parameters**
mol_list: list of Molecule instances
A list of each kind of molecules to add to the system.
mol_number: list of int
The number of molecules to place for each kind.
size: np.ndarray((3,), float)
The box size in nm
spacing: np.ndarray((3,), float), [0.3 0.3 0.3]
The lattice spacing in nm.
**Returns**
A System instance.
**Example**
Typical box with 1000 water molecules randomly placed in a box of size
``[2.0 2.0 2.0]``::
from chemlab.db import ChemlabDB
# Example water molecule
water = ChemlabDB().get('molecule', 'example.water')
s = random_water_box([water], [1000], [2.0, 2.0, 2.0]) | Below is the the instruction that describes the task:
### Input:
Make a box by placing the molecules specified in *mol_list* on
random points of an evenly spaced lattice.
Using a lattice automatically ensures that no two molecules are
overlapping.
**Parameters**
mol_list: list of Molecule instances
A list of each kind of molecules to add to the system.
mol_number: list of int
The number of molecules to place for each kind.
size: np.ndarray((3,), float)
The box size in nm
spacing: np.ndarray((3,), float), [0.3 0.3 0.3]
The lattice spacing in nm.
**Returns**
A System instance.
**Example**
Typical box with 1000 water molecules randomly placed in a box of size
``[2.0 2.0 2.0]``::
from chemlab.db import ChemlabDB
# Example water molecule
water = ChemlabDB().get('molecule', 'example.water')
s = random_water_box([water], [1000], [2.0, 2.0, 2.0])
### Response:
def random_lattice_box(mol_list, mol_number, size,
spacing=np.array([0.3, 0.3, 0.3])):
'''Make a box by placing the molecules specified in *mol_list* on
random points of an evenly spaced lattice.
Using a lattice automatically ensures that no two molecules are
overlapping.
**Parameters**
mol_list: list of Molecule instances
A list of each kind of molecules to add to the system.
mol_number: list of int
The number of molecules to place for each kind.
size: np.ndarray((3,), float)
The box size in nm
spacing: np.ndarray((3,), float), [0.3 0.3 0.3]
The lattice spacing in nm.
**Returns**
A System instance.
**Example**
Typical box with 1000 water molecules randomly placed in a box of size
``[2.0 2.0 2.0]``::
from chemlab.db import ChemlabDB
# Example water molecule
water = ChemlabDB().get('molecule', 'example.water')
s = random_water_box([water], [1000], [2.0, 2.0, 2.0])
'''
# Generate the coordinates
positions = spaced_lattice(size, spacing)
# Randomize them
np.random.shuffle(positions)
n_mol = sum(mol_number)
n_atoms = sum(nmol*mol.n_atoms for mol, nmol in zip(mol_list, mol_number))
# Assert that we have enough space
assert len(positions) >= n_mol, "Can't fit {} molecules in {} spaces".format(n_mol,
len(positions))
box_vectors = np.zeros((3, 3))
box_vectors[0,0] = size[0]
box_vectors[1,1] = size[1]
box_vectors[2,2] = size[2]
# Initialize a system
s = System.empty()
with s.batch() as b:
mol_list = [m.copy() for m in mol_list]
# Add the molecules
pi = 0
for i, mol in enumerate(mol_list):
for j in range(mol_number[i]):
mol.move_to(positions[pi])
b.append(mol.copy())
pi += 1
return s |
def get_vcf_handle(fsock=None, infile=None):
"""Open the vcf file and return a handle"""
vcf = None
if (fsock or infile):
if fsock:
# if not infile and hasattr(fsock, 'name'):
logger.info("Reading vcf form stdin")
if sys.version_info < (3, 0):
logger.info("Using codecs to read stdin")
sys.stdin = getreader('utf-8')(fsock)
vcf = sys.stdin
else:
logger.info("Reading vcf from file {0}".format(infile))
file_name, file_extension = os.path.splitext(infile)
if file_extension == '.gz':
logger.debug("Vcf is zipped")
vcf = getreader('utf-8')(gzip.open(infile), errors='replace')
elif file_extension == '.vcf':
vcf = open(infile, mode='r', encoding='utf-8', errors='replace')
else:
raise IOError("File is not in a supported format!\n"
" Or use correct ending(.vcf or .vcf.gz)")
else:
raise IOError("Please provide a fsock or infile")
return vcf | Open the vcf file and return a handle | Below is the the instruction that describes the task:
### Input:
Open the vcf file and return a handle
### Response:
def get_vcf_handle(fsock=None, infile=None):
"""Open the vcf file and return a handle"""
vcf = None
if (fsock or infile):
if fsock:
# if not infile and hasattr(fsock, 'name'):
logger.info("Reading vcf form stdin")
if sys.version_info < (3, 0):
logger.info("Using codecs to read stdin")
sys.stdin = getreader('utf-8')(fsock)
vcf = sys.stdin
else:
logger.info("Reading vcf from file {0}".format(infile))
file_name, file_extension = os.path.splitext(infile)
if file_extension == '.gz':
logger.debug("Vcf is zipped")
vcf = getreader('utf-8')(gzip.open(infile), errors='replace')
elif file_extension == '.vcf':
vcf = open(infile, mode='r', encoding='utf-8', errors='replace')
else:
raise IOError("File is not in a supported format!\n"
" Or use correct ending(.vcf or .vcf.gz)")
else:
raise IOError("Please provide a fsock or infile")
return vcf |
def to_dict(self):
"""
This method converts the DictCell into a python `dict`. This is useful
for JSON serialization.
"""
output = {}
for key, value in self.__dict__['p'].iteritems():
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif hasattr(value, 'to_dot'):
output[key] = value.to_dot()
elif hasattr(value, 'to_dict'):
output[key] = value.to_dict()
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
ms = time.mktime(value.utctimetuple()) * 1000
ms += getattr(value, 'microseconds', 0) / 1000
output[key] = int(ms)
elif isinstance(value, dict):
output[key] = []
else:
raise ValueError('cannot encode ' + repr(key))
return output | This method converts the DictCell into a python `dict`. This is useful
for JSON serialization. | Below is the the instruction that describes the task:
### Input:
This method converts the DictCell into a python `dict`. This is useful
for JSON serialization.
### Response:
def to_dict(self):
"""
This method converts the DictCell into a python `dict`. This is useful
for JSON serialization.
"""
output = {}
for key, value in self.__dict__['p'].iteritems():
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif hasattr(value, 'to_dot'):
output[key] = value.to_dot()
elif hasattr(value, 'to_dict'):
output[key] = value.to_dict()
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
ms = time.mktime(value.utctimetuple()) * 1000
ms += getattr(value, 'microseconds', 0) / 1000
output[key] = int(ms)
elif isinstance(value, dict):
output[key] = []
else:
raise ValueError('cannot encode ' + repr(key))
return output |
def btc_tx_output_parse_script( scriptpubkey ):
"""
Given the hex representation of a script,
turn it into a nice, easy-to-read dict.
The dict will have:
* asm: the disassembled script as a string
* hex: the raw hex (given as an argument)
* type: the type of script
Optionally, it will have:
* addresses: a list of addresses the script represents (if applicable)
* reqSigs: the number of required signatures (if applicable)
"""
script_type = None
reqSigs = None
addresses = []
script_type = btc_script_classify(scriptpubkey)
script_tokens = btc_script_deserialize(scriptpubkey)
if script_type in ['p2pkh']:
script_type = "pubkeyhash"
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError("Failed to parse scriptpubkey address")
addresses = [addr]
elif script_type in ['p2sh', 'p2sh-p2wpkh', 'p2sh-p2wsh']:
script_type = "scripthash"
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError("Failed to parse scriptpubkey address")
addresses = [addr]
elif script_type == 'p2pk':
script_type = "pubkey"
reqSigs = 1
elif script_type is None:
script_type = "nonstandard"
ret = {
"asm": btc_tx_script_to_asm(scriptpubkey),
"hex": scriptpubkey,
"type": script_type
}
if addresses is not None:
ret['addresses'] = addresses
if reqSigs is not None:
ret['reqSigs'] = reqSigs
# print 'parse script {}: {}'.format(scriptpubkey, ret)
return ret | Given the hex representation of a script,
turn it into a nice, easy-to-read dict.
The dict will have:
* asm: the disassembled script as a string
* hex: the raw hex (given as an argument)
* type: the type of script
Optionally, it will have:
* addresses: a list of addresses the script represents (if applicable)
* reqSigs: the number of required signatures (if applicable) | Below is the the instruction that describes the task:
### Input:
Given the hex representation of a script,
turn it into a nice, easy-to-read dict.
The dict will have:
* asm: the disassembled script as a string
* hex: the raw hex (given as an argument)
* type: the type of script
Optionally, it will have:
* addresses: a list of addresses the script represents (if applicable)
* reqSigs: the number of required signatures (if applicable)
### Response:
def btc_tx_output_parse_script( scriptpubkey ):
"""
Given the hex representation of a script,
turn it into a nice, easy-to-read dict.
The dict will have:
* asm: the disassembled script as a string
* hex: the raw hex (given as an argument)
* type: the type of script
Optionally, it will have:
* addresses: a list of addresses the script represents (if applicable)
* reqSigs: the number of required signatures (if applicable)
"""
script_type = None
reqSigs = None
addresses = []
script_type = btc_script_classify(scriptpubkey)
script_tokens = btc_script_deserialize(scriptpubkey)
if script_type in ['p2pkh']:
script_type = "pubkeyhash"
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError("Failed to parse scriptpubkey address")
addresses = [addr]
elif script_type in ['p2sh', 'p2sh-p2wpkh', 'p2sh-p2wsh']:
script_type = "scripthash"
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError("Failed to parse scriptpubkey address")
addresses = [addr]
elif script_type == 'p2pk':
script_type = "pubkey"
reqSigs = 1
elif script_type is None:
script_type = "nonstandard"
ret = {
"asm": btc_tx_script_to_asm(scriptpubkey),
"hex": scriptpubkey,
"type": script_type
}
if addresses is not None:
ret['addresses'] = addresses
if reqSigs is not None:
ret['reqSigs'] = reqSigs
# print 'parse script {}: {}'.format(scriptpubkey, ret)
return ret |
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
TARFileEntry: file entry.
"""
path_spec = tar_path_spec.TARPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | Retrieves the root file entry.
Returns:
TARFileEntry: file entry. | Below is the the instruction that describes the task:
### Input:
Retrieves the root file entry.
Returns:
TARFileEntry: file entry.
### Response:
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
TARFileEntry: file entry.
"""
path_spec = tar_path_spec.TARPathSpec(
location=self.LOCATION_ROOT, parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) |
def fit(
self,
durations,
event_observed=None,
timeline=None,
label=None,
alpha=None,
ci_labels=None,
show_progress=False,
entry=None,
weights=None,
left_censorship=False,
): # pylint: disable=too-many-arguments
"""
Parameters
----------
durations: an array, or pd.Series
length n, duration subject was observed for
event_observed: numpy array or pd.Series, optional
length n, True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: list, optional
return the estimate at the values in timeline (positively increasing)
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: list, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
show_progress: boolean, optional
since this is an iterative fitting algorithm, switching this to True will display some iteration details.
entry: an array, or pd.Series, of length n
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born": time zero.
weights: an array, or pd.Series, of length n
integer weights per observation
Returns
-------
self
self with new properties like ``cumulative_hazard_``, ``survival_function_``
"""
if left_censorship:
warnings.warn(
"kwarg left_censorship is deprecated and will be removed in a future release. Please use ``.fit_left_censoring`` instead.",
DeprecationWarning,
)
return self.fit_left_censoring(
durations, event_observed, timeline, label, alpha, ci_labels, show_progress, entry, weights
)
self.durations = np.asarray(pass_for_numeric_dtypes_or_raise_array(durations))
check_nans_or_infs(self.durations)
check_positivity(self.durations)
self._censoring_type = CensoringType.RIGHT
return self._fit(
(self.durations, None),
event_observed=event_observed,
timeline=timeline,
label=label,
alpha=alpha,
ci_labels=ci_labels,
show_progress=show_progress,
entry=entry,
weights=weights,
) | Parameters
----------
durations: an array, or pd.Series
length n, duration subject was observed for
event_observed: numpy array or pd.Series, optional
length n, True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: list, optional
return the estimate at the values in timeline (positively increasing)
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: list, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
show_progress: boolean, optional
since this is an iterative fitting algorithm, switching this to True will display some iteration details.
entry: an array, or pd.Series, of length n
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born": time zero.
weights: an array, or pd.Series, of length n
integer weights per observation
Returns
-------
self
self with new properties like ``cumulative_hazard_``, ``survival_function_`` | Below is the the instruction that describes the task:
### Input:
Parameters
----------
durations: an array, or pd.Series
length n, duration subject was observed for
event_observed: numpy array or pd.Series, optional
length n, True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: list, optional
return the estimate at the values in timeline (positively increasing)
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: list, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
show_progress: boolean, optional
since this is an iterative fitting algorithm, switching this to True will display some iteration details.
entry: an array, or pd.Series, of length n
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born": time zero.
weights: an array, or pd.Series, of length n
integer weights per observation
Returns
-------
self
self with new properties like ``cumulative_hazard_``, ``survival_function_``
### Response:
def fit(
self,
durations,
event_observed=None,
timeline=None,
label=None,
alpha=None,
ci_labels=None,
show_progress=False,
entry=None,
weights=None,
left_censorship=False,
): # pylint: disable=too-many-arguments
"""
Parameters
----------
durations: an array, or pd.Series
length n, duration subject was observed for
event_observed: numpy array or pd.Series, optional
length n, True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None
timeline: list, optional
return the estimate at the values in timeline (positively increasing)
label: string, optional
a string to name the column of the estimate.
alpha: float, optional
the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: list, optional
add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
show_progress: boolean, optional
since this is an iterative fitting algorithm, switching this to True will display some iteration details.
entry: an array, or pd.Series, of length n
relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population
entered study when they were "born": time zero.
weights: an array, or pd.Series, of length n
integer weights per observation
Returns
-------
self
self with new properties like ``cumulative_hazard_``, ``survival_function_``
"""
if left_censorship:
warnings.warn(
"kwarg left_censorship is deprecated and will be removed in a future release. Please use ``.fit_left_censoring`` instead.",
DeprecationWarning,
)
return self.fit_left_censoring(
durations, event_observed, timeline, label, alpha, ci_labels, show_progress, entry, weights
)
self.durations = np.asarray(pass_for_numeric_dtypes_or_raise_array(durations))
check_nans_or_infs(self.durations)
check_positivity(self.durations)
self._censoring_type = CensoringType.RIGHT
return self._fit(
(self.durations, None),
event_observed=event_observed,
timeline=timeline,
label=label,
alpha=alpha,
ci_labels=ci_labels,
show_progress=show_progress,
entry=entry,
weights=weights,
) |
def _patch_argument_parser(self):
'''
Since argparse doesn't support much introspection, we monkey-patch it to replace the parse_known_args method and
all actions with hooks that tell us which action was last taken or about to be taken, and let us have the parser
figure out which subparsers need to be activated (then recursively monkey-patch those).
We save all active ArgumentParsers to extract all their possible option names later.
'''
active_parsers = [self._parser]
parsed_args = argparse.Namespace()
visited_actions = []
def patch(parser):
parser.__class__ = IntrospectiveArgumentParser
for action in parser._actions:
# TODO: accomplish this with super
class IntrospectAction(action.__class__):
def __call__(self, parser, namespace, values, option_string=None):
debug('Action stub called on', self)
debug('\targs:', parser, namespace, values, option_string)
debug('\torig class:', self._orig_class)
debug('\torig callable:', self._orig_callable)
visited_actions.append(self)
if self._orig_class == argparse._SubParsersAction:
debug('orig class is a subparsers action: patching and running it')
active_subparser = self._name_parser_map[values[0]]
patch(active_subparser)
active_parsers.append(active_subparser)
self._orig_callable(parser, namespace, values, option_string=option_string)
elif self._orig_class in safe_actions:
self._orig_callable(parser, namespace, values, option_string=option_string)
if getattr(action, "_orig_class", None):
debug("Action", action, "already patched")
action._orig_class = action.__class__
action._orig_callable = action.__call__
action.__class__ = IntrospectAction
patch(self._parser)
debug("Active parsers:", active_parsers)
debug("Visited actions:", visited_actions)
debug("Parse result namespace:", parsed_args)
return active_parsers, parsed_args | Since argparse doesn't support much introspection, we monkey-patch it to replace the parse_known_args method and
all actions with hooks that tell us which action was last taken or about to be taken, and let us have the parser
figure out which subparsers need to be activated (then recursively monkey-patch those).
We save all active ArgumentParsers to extract all their possible option names later. | Below is the the instruction that describes the task:
### Input:
Since argparse doesn't support much introspection, we monkey-patch it to replace the parse_known_args method and
all actions with hooks that tell us which action was last taken or about to be taken, and let us have the parser
figure out which subparsers need to be activated (then recursively monkey-patch those).
We save all active ArgumentParsers to extract all their possible option names later.
### Response:
def _patch_argument_parser(self):
'''
Since argparse doesn't support much introspection, we monkey-patch it to replace the parse_known_args method and
all actions with hooks that tell us which action was last taken or about to be taken, and let us have the parser
figure out which subparsers need to be activated (then recursively monkey-patch those).
We save all active ArgumentParsers to extract all their possible option names later.
'''
active_parsers = [self._parser]
parsed_args = argparse.Namespace()
visited_actions = []
def patch(parser):
parser.__class__ = IntrospectiveArgumentParser
for action in parser._actions:
# TODO: accomplish this with super
class IntrospectAction(action.__class__):
def __call__(self, parser, namespace, values, option_string=None):
debug('Action stub called on', self)
debug('\targs:', parser, namespace, values, option_string)
debug('\torig class:', self._orig_class)
debug('\torig callable:', self._orig_callable)
visited_actions.append(self)
if self._orig_class == argparse._SubParsersAction:
debug('orig class is a subparsers action: patching and running it')
active_subparser = self._name_parser_map[values[0]]
patch(active_subparser)
active_parsers.append(active_subparser)
self._orig_callable(parser, namespace, values, option_string=option_string)
elif self._orig_class in safe_actions:
self._orig_callable(parser, namespace, values, option_string=option_string)
if getattr(action, "_orig_class", None):
debug("Action", action, "already patched")
action._orig_class = action.__class__
action._orig_callable = action.__call__
action.__class__ = IntrospectAction
patch(self._parser)
debug("Active parsers:", active_parsers)
debug("Visited actions:", visited_actions)
debug("Parse result namespace:", parsed_args)
return active_parsers, parsed_args |
def get_args(cls, dist, header=None):
"""Overrides easy_install.ScriptWriter.get_args
This method avoids using pkg_resources to map a named entry_point
to a callable at invocation time.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
# ensure_safe_name
if re.search(r'[\\/]', name):
raise ValueError("Path separators not allowed in script names")
script_text = TEMPLATE.format(
ep.module_name,
ep.attrs[0],
'.'.join(ep.attrs),
spec,
group,
name,
)
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res | Overrides easy_install.ScriptWriter.get_args
This method avoids using pkg_resources to map a named entry_point
to a callable at invocation time. | Below is the the instruction that describes the task:
### Input:
Overrides easy_install.ScriptWriter.get_args
This method avoids using pkg_resources to map a named entry_point
to a callable at invocation time.
### Response:
def get_args(cls, dist, header=None):
"""Overrides easy_install.ScriptWriter.get_args
This method avoids using pkg_resources to map a named entry_point
to a callable at invocation time.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
# ensure_safe_name
if re.search(r'[\\/]', name):
raise ValueError("Path separators not allowed in script names")
script_text = TEMPLATE.format(
ep.module_name,
ep.attrs[0],
'.'.join(ep.attrs),
spec,
group,
name,
)
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res |
def import_vmesh(file):
""" Imports NURBS volume(s) from volume mesh (vmesh) file(s).
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS volumes
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_vol_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_vol_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements | Imports NURBS volume(s) from volume mesh (vmesh) file(s).
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS volumes
:rtype: list
:raises GeomdlException: an error occurred reading the file | Below is the the instruction that describes the task:
### Input:
Imports NURBS volume(s) from volume mesh (vmesh) file(s).
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS volumes
:rtype: list
:raises GeomdlException: an error occurred reading the file
### Response:
def import_vmesh(file):
""" Imports NURBS volume(s) from volume mesh (vmesh) file(s).
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS volumes
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_vol_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_vol_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements |
def _mk_uninit_array(self, bounds):
""" given a list of bounds for the N dimensions of an array,
_mk_uninit_array() creates and returns an N-dimensional array of
the size specified by the bounds with each element set to the value
None."""
if len(bounds) == 0:
raise For2PyError("Zero-length arrays current not handled!.")
this_dim = bounds[0]
lo,hi = this_dim[0],this_dim[1]
sz = hi-lo+1
if len(bounds) == 1:
return [None] * sz
sub_array = self._mk_uninit_array(bounds[1:])
this_array = [copy.deepcopy(sub_array) for i in range(sz)]
return this_array | given a list of bounds for the N dimensions of an array,
_mk_uninit_array() creates and returns an N-dimensional array of
the size specified by the bounds with each element set to the value
None. | Below is the the instruction that describes the task:
### Input:
given a list of bounds for the N dimensions of an array,
_mk_uninit_array() creates and returns an N-dimensional array of
the size specified by the bounds with each element set to the value
None.
### Response:
def _mk_uninit_array(self, bounds):
""" given a list of bounds for the N dimensions of an array,
_mk_uninit_array() creates and returns an N-dimensional array of
the size specified by the bounds with each element set to the value
None."""
if len(bounds) == 0:
raise For2PyError("Zero-length arrays current not handled!.")
this_dim = bounds[0]
lo,hi = this_dim[0],this_dim[1]
sz = hi-lo+1
if len(bounds) == 1:
return [None] * sz
sub_array = self._mk_uninit_array(bounds[1:])
this_array = [copy.deepcopy(sub_array) for i in range(sz)]
return this_array |
def getLocalID(self):
"""Return the identifier that should be sent as the
openid.identity parameter to the server."""
# I looked at this conditional and thought "ah-hah! there's the bug!"
# but Python actually makes that one big expression somehow, i.e.
# "x is x is x" is not the same thing as "(x is x) is x".
# That's pretty weird, dude. -- kmt, 1/07
if (self.local_id is self.canonicalID is None):
return self.claimed_id
else:
return self.local_id or self.canonicalID | Return the identifier that should be sent as the
openid.identity parameter to the server. | Below is the the instruction that describes the task:
### Input:
Return the identifier that should be sent as the
openid.identity parameter to the server.
### Response:
def getLocalID(self):
"""Return the identifier that should be sent as the
openid.identity parameter to the server."""
# I looked at this conditional and thought "ah-hah! there's the bug!"
# but Python actually makes that one big expression somehow, i.e.
# "x is x is x" is not the same thing as "(x is x) is x".
# That's pretty weird, dude. -- kmt, 1/07
if (self.local_id is self.canonicalID is None):
return self.claimed_id
else:
return self.local_id or self.canonicalID |
def send_event(self, instance, message, event_type=None, time=None,
severity='info', source=None, sequence_number=None):
"""
Post a new event.
:param str instance: A Yamcs instance name.
:param str message: Event message.
:param Optional[str] event_type: Type of event.
:param severity: The severity level of the event. One of ``info``,
``watch``, ``warning``, ``critical`` or ``severe``.
Defaults to ``info``.
:type severity: Optional[str]
:param time: Time of the event. If unspecified, defaults to mission time.
:type time: Optional[~datetime.datetime]
:param source: Source of the event. Useful for grouping events in the
archive. When unset this defaults to ``User``.
:type source: Optional[str]
:param sequence_number: Sequence number of this event. This is primarily
used to determine unicity of events coming from
the same source. If not set Yamcs will
automatically assign a sequential number as if
every submitted event is unique.
:type sequence_number: Optional[int]
"""
req = rest_pb2.CreateEventRequest()
req.message = message
req.severity = severity
if event_type:
req.type = event_type
if time:
req.time = to_isostring(time)
if source:
req.source = source
if sequence_number is not None:
req.sequence_number = sequence_number
url = '/archive/{}/events'.format(instance)
self.post_proto(url, data=req.SerializeToString()) | Post a new event.
:param str instance: A Yamcs instance name.
:param str message: Event message.
:param Optional[str] event_type: Type of event.
:param severity: The severity level of the event. One of ``info``,
``watch``, ``warning``, ``critical`` or ``severe``.
Defaults to ``info``.
:type severity: Optional[str]
:param time: Time of the event. If unspecified, defaults to mission time.
:type time: Optional[~datetime.datetime]
:param source: Source of the event. Useful for grouping events in the
archive. When unset this defaults to ``User``.
:type source: Optional[str]
:param sequence_number: Sequence number of this event. This is primarily
used to determine unicity of events coming from
the same source. If not set Yamcs will
automatically assign a sequential number as if
every submitted event is unique.
:type sequence_number: Optional[int] | Below is the the instruction that describes the task:
### Input:
Post a new event.
:param str instance: A Yamcs instance name.
:param str message: Event message.
:param Optional[str] event_type: Type of event.
:param severity: The severity level of the event. One of ``info``,
``watch``, ``warning``, ``critical`` or ``severe``.
Defaults to ``info``.
:type severity: Optional[str]
:param time: Time of the event. If unspecified, defaults to mission time.
:type time: Optional[~datetime.datetime]
:param source: Source of the event. Useful for grouping events in the
archive. When unset this defaults to ``User``.
:type source: Optional[str]
:param sequence_number: Sequence number of this event. This is primarily
used to determine unicity of events coming from
the same source. If not set Yamcs will
automatically assign a sequential number as if
every submitted event is unique.
:type sequence_number: Optional[int]
### Response:
def send_event(self, instance, message, event_type=None, time=None,
severity='info', source=None, sequence_number=None):
"""
Post a new event.
:param str instance: A Yamcs instance name.
:param str message: Event message.
:param Optional[str] event_type: Type of event.
:param severity: The severity level of the event. One of ``info``,
``watch``, ``warning``, ``critical`` or ``severe``.
Defaults to ``info``.
:type severity: Optional[str]
:param time: Time of the event. If unspecified, defaults to mission time.
:type time: Optional[~datetime.datetime]
:param source: Source of the event. Useful for grouping events in the
archive. When unset this defaults to ``User``.
:type source: Optional[str]
:param sequence_number: Sequence number of this event. This is primarily
used to determine unicity of events coming from
the same source. If not set Yamcs will
automatically assign a sequential number as if
every submitted event is unique.
:type sequence_number: Optional[int]
"""
req = rest_pb2.CreateEventRequest()
req.message = message
req.severity = severity
if event_type:
req.type = event_type
if time:
req.time = to_isostring(time)
if source:
req.source = source
if sequence_number is not None:
req.sequence_number = sequence_number
url = '/archive/{}/events'.format(instance)
self.post_proto(url, data=req.SerializeToString()) |
def update_customer_by_id(cls, customer_id, customer, **kwargs):
"""Update Customer
Update attributes of Customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_customer_by_id(customer_id, customer, async=True)
>>> result = thread.get()
:param async bool
:param str customer_id: ID of customer to update. (required)
:param Customer customer: Attributes of customer to update. (required)
:return: Customer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_customer_by_id_with_http_info(customer_id, customer, **kwargs)
else:
(data) = cls._update_customer_by_id_with_http_info(customer_id, customer, **kwargs)
return data | Update Customer
Update attributes of Customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_customer_by_id(customer_id, customer, async=True)
>>> result = thread.get()
:param async bool
:param str customer_id: ID of customer to update. (required)
:param Customer customer: Attributes of customer to update. (required)
:return: Customer
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Update Customer
Update attributes of Customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_customer_by_id(customer_id, customer, async=True)
>>> result = thread.get()
:param async bool
:param str customer_id: ID of customer to update. (required)
:param Customer customer: Attributes of customer to update. (required)
:return: Customer
If the method is called asynchronously,
returns the request thread.
### Response:
def update_customer_by_id(cls, customer_id, customer, **kwargs):
"""Update Customer
Update attributes of Customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_customer_by_id(customer_id, customer, async=True)
>>> result = thread.get()
:param async bool
:param str customer_id: ID of customer to update. (required)
:param Customer customer: Attributes of customer to update. (required)
:return: Customer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_customer_by_id_with_http_info(customer_id, customer, **kwargs)
else:
(data) = cls._update_customer_by_id_with_http_info(customer_id, customer, **kwargs)
return data |
def _spectrum(self, photon_energy):
"""Compute differential IC spectrum for energies in ``photon_energy``.
Compute IC spectrum using IC cross-section for isotropic interaction
with a blackbody photon spectrum following Khangulyan, Aharonian, and
Kelner 2014, ApJ 783, 100 (`arXiv:1310.7971
<http://www.arxiv.org/abs/1310.7971>`_).
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
outspecene = _validate_ene(photon_energy)
self.specic = []
for seed in self.seed_photon_fields:
# Call actual computation, detached to allow changes in subclasses
self.specic.append(
self._calc_specic(seed, outspecene).to("1/(s eV)")
)
return np.sum(u.Quantity(self.specic), axis=0) | Compute differential IC spectrum for energies in ``photon_energy``.
Compute IC spectrum using IC cross-section for isotropic interaction
with a blackbody photon spectrum following Khangulyan, Aharonian, and
Kelner 2014, ApJ 783, 100 (`arXiv:1310.7971
<http://www.arxiv.org/abs/1310.7971>`_).
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array. | Below is the the instruction that describes the task:
### Input:
Compute differential IC spectrum for energies in ``photon_energy``.
Compute IC spectrum using IC cross-section for isotropic interaction
with a blackbody photon spectrum following Khangulyan, Aharonian, and
Kelner 2014, ApJ 783, 100 (`arXiv:1310.7971
<http://www.arxiv.org/abs/1310.7971>`_).
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
### Response:
def _spectrum(self, photon_energy):
"""Compute differential IC spectrum for energies in ``photon_energy``.
Compute IC spectrum using IC cross-section for isotropic interaction
with a blackbody photon spectrum following Khangulyan, Aharonian, and
Kelner 2014, ApJ 783, 100 (`arXiv:1310.7971
<http://www.arxiv.org/abs/1310.7971>`_).
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
outspecene = _validate_ene(photon_energy)
self.specic = []
for seed in self.seed_photon_fields:
# Call actual computation, detached to allow changes in subclasses
self.specic.append(
self._calc_specic(seed, outspecene).to("1/(s eV)")
)
return np.sum(u.Quantity(self.specic), axis=0) |
def count_funs(self) -> int:
""" Count function define by this scope """
n = 0
for s in self._hsig.values():
if hasattr(s, 'is_fun') and s.is_fun:
n += 1
return n | Count function define by this scope | Below is the the instruction that describes the task:
### Input:
Count function define by this scope
### Response:
def count_funs(self) -> int:
""" Count function define by this scope """
n = 0
for s in self._hsig.values():
if hasattr(s, 'is_fun') and s.is_fun:
n += 1
return n |
def mgmt_root(opt_bigip, opt_username, opt_password, opt_port, opt_token):
'''bigip fixture'''
try:
from pytest import symbols
except ImportError:
m = ManagementRoot(opt_bigip, opt_username, opt_password,
port=opt_port, token=opt_token)
else:
if symbols is not None:
m = ManagementRoot(symbols.bigip_mgmt_ip_public,
symbols.bigip_username,
symbols.bigip_password,
port=opt_port, token=opt_token)
else:
m = ManagementRoot(opt_bigip, opt_username, opt_password,
port=opt_port, token=opt_token)
return m | bigip fixture | Below is the the instruction that describes the task:
### Input:
bigip fixture
### Response:
def mgmt_root(opt_bigip, opt_username, opt_password, opt_port, opt_token):
'''bigip fixture'''
try:
from pytest import symbols
except ImportError:
m = ManagementRoot(opt_bigip, opt_username, opt_password,
port=opt_port, token=opt_token)
else:
if symbols is not None:
m = ManagementRoot(symbols.bigip_mgmt_ip_public,
symbols.bigip_username,
symbols.bigip_password,
port=opt_port, token=opt_token)
else:
m = ManagementRoot(opt_bigip, opt_username, opt_password,
port=opt_port, token=opt_token)
return m |
def reformat_record(record):
"""Repack a record into a cleaner structure for consumption."""
return {
"key": record["dynamodb"].get("Keys", None),
"new": record["dynamodb"].get("NewImage", None),
"old": record["dynamodb"].get("OldImage", None),
"meta": {
"created_at": record["dynamodb"]["ApproximateCreationDateTime"],
"event": {
"id": record["eventID"],
"type": record["eventName"].lower(),
"version": record["eventVersion"]
},
"sequence_number": record["dynamodb"]["SequenceNumber"],
}
} | Repack a record into a cleaner structure for consumption. | Below is the the instruction that describes the task:
### Input:
Repack a record into a cleaner structure for consumption.
### Response:
def reformat_record(record):
"""Repack a record into a cleaner structure for consumption."""
return {
"key": record["dynamodb"].get("Keys", None),
"new": record["dynamodb"].get("NewImage", None),
"old": record["dynamodb"].get("OldImage", None),
"meta": {
"created_at": record["dynamodb"]["ApproximateCreationDateTime"],
"event": {
"id": record["eventID"],
"type": record["eventName"].lower(),
"version": record["eventVersion"]
},
"sequence_number": record["dynamodb"]["SequenceNumber"],
}
} |
def flatten(items):
"""
Yield items from any nested iterable. Used by ``QuadTree.flatten`` to one-dimensionalize a list of sublists.
cf. http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
"""
for x in items:
if isinstance(x, Iterable):
yield from flatten(x)
else:
yield x | Yield items from any nested iterable. Used by ``QuadTree.flatten`` to one-dimensionalize a list of sublists.
cf. http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python | Below is the the instruction that describes the task:
### Input:
Yield items from any nested iterable. Used by ``QuadTree.flatten`` to one-dimensionalize a list of sublists.
cf. http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
### Response:
def flatten(items):
"""
Yield items from any nested iterable. Used by ``QuadTree.flatten`` to one-dimensionalize a list of sublists.
cf. http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
"""
for x in items:
if isinstance(x, Iterable):
yield from flatten(x)
else:
yield x |
def func(command, description, link, params_string, returns="On success, the sent Message is returned.", return_type="Message"):
"""
Live template for pycharm:
y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$")
"""
variables_needed = []
variables_optional = []
imports = set()
if params_string: # WHITELISTED_FUNCS have no params
for param in params_string.split("\n"):
variable = parse_param_types(param)
# any variable.types has always_is_value => lenght must be 1.
assert (not any([type_.always_is_value is not None for type_ in variable.types]) or len(variable.types) == 1)
if variable.optional:
variables_optional.append(variable)
else:
variables_needed.append(variable)
# end if
imports.update(variable.all_imports)
# end for
# end if
imports = list(imports)
imports.sort()
returns = Variable(types=as_types(return_type, variable_name="return type"), description=returns)
func_object = Function(
imports=imports, api_name=command, link=link, description=description, returns=returns,
parameters=variables_needed, keywords=variables_optional
)
return func_object | Live template for pycharm:
y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$") | Below is the the instruction that describes the task:
### Input:
Live template for pycharm:
y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$")
### Response:
def func(command, description, link, params_string, returns="On success, the sent Message is returned.", return_type="Message"):
"""
Live template for pycharm:
y = func(command="$cmd$", description="$desc$", link="$lnk$", params_string="$first_param$", returns="$returns$", return_type="$returntype$")
"""
variables_needed = []
variables_optional = []
imports = set()
if params_string: # WHITELISTED_FUNCS have no params
for param in params_string.split("\n"):
variable = parse_param_types(param)
# any variable.types has always_is_value => lenght must be 1.
assert (not any([type_.always_is_value is not None for type_ in variable.types]) or len(variable.types) == 1)
if variable.optional:
variables_optional.append(variable)
else:
variables_needed.append(variable)
# end if
imports.update(variable.all_imports)
# end for
# end if
imports = list(imports)
imports.sort()
returns = Variable(types=as_types(return_type, variable_name="return type"), description=returns)
func_object = Function(
imports=imports, api_name=command, link=link, description=description, returns=returns,
parameters=variables_needed, keywords=variables_optional
)
return func_object |
def sort_filter(l, mirror=False, splice=0, splice_random=False):
"""
Rearranges an interval of pixels.
:param l: The interval, as a list of pixels
:param mirror: Whether to put each element in the list alternatively at the start or end of the list, effectively
mirroring a sorted list.
This is particularly useful with pixel paths that are looped, so that the beginning and end will not be
discontinuous.
:param splice: A value in the range [0,1] that picks a point in the list and makes it the start of the interval
pixels before this element are moved to the end. A value of 0 uses the existing first element of the list as the
starting point, and a value of 1 makes the last element in the list be the start.
:param splice_random: Splices the list at a random point.
:return: A modified copy of the list of pixels.
"""
if len(l) == 0:
return l
nl = list(l)
if mirror:
get_index = put_index = 0
while get_index < len(l):
nl[put_index] = l[get_index]
get_index += 1
if put_index >= 0:
put_index += 1
put_index *= -1
if splice_random:
splice_start = randrange(len(l))
elif splice > 0:
splice_start = int((len(l) - 1) * splice)
else:
splice_start = None
if splice_start is not None:
nl = nl[splice_start:] + nl[:splice_start]
return nl | Rearranges an interval of pixels.
:param l: The interval, as a list of pixels
:param mirror: Whether to put each element in the list alternatively at the start or end of the list, effectively
mirroring a sorted list.
This is particularly useful with pixel paths that are looped, so that the beginning and end will not be
discontinuous.
:param splice: A value in the range [0,1] that picks a point in the list and makes it the start of the interval
pixels before this element are moved to the end. A value of 0 uses the existing first element of the list as the
starting point, and a value of 1 makes the last element in the list be the start.
:param splice_random: Splices the list at a random point.
:return: A modified copy of the list of pixels. | Below is the the instruction that describes the task:
### Input:
Rearranges an interval of pixels.
:param l: The interval, as a list of pixels
:param mirror: Whether to put each element in the list alternatively at the start or end of the list, effectively
mirroring a sorted list.
This is particularly useful with pixel paths that are looped, so that the beginning and end will not be
discontinuous.
:param splice: A value in the range [0,1] that picks a point in the list and makes it the start of the interval
pixels before this element are moved to the end. A value of 0 uses the existing first element of the list as the
starting point, and a value of 1 makes the last element in the list be the start.
:param splice_random: Splices the list at a random point.
:return: A modified copy of the list of pixels.
### Response:
def sort_filter(l, mirror=False, splice=0, splice_random=False):
"""
Rearranges an interval of pixels.
:param l: The interval, as a list of pixels
:param mirror: Whether to put each element in the list alternatively at the start or end of the list, effectively
mirroring a sorted list.
This is particularly useful with pixel paths that are looped, so that the beginning and end will not be
discontinuous.
:param splice: A value in the range [0,1] that picks a point in the list and makes it the start of the interval
pixels before this element are moved to the end. A value of 0 uses the existing first element of the list as the
starting point, and a value of 1 makes the last element in the list be the start.
:param splice_random: Splices the list at a random point.
:return: A modified copy of the list of pixels.
"""
if len(l) == 0:
return l
nl = list(l)
if mirror:
get_index = put_index = 0
while get_index < len(l):
nl[put_index] = l[get_index]
get_index += 1
if put_index >= 0:
put_index += 1
put_index *= -1
if splice_random:
splice_start = randrange(len(l))
elif splice > 0:
splice_start = int((len(l) - 1) * splice)
else:
splice_start = None
if splice_start is not None:
nl = nl[splice_start:] + nl[:splice_start]
return nl |
def _sanitize_config(custom_config):
"""Checks whether ``custom_config`` is sane and returns a sanitized dict <str -> (str|object)>
It checks if keys are all strings and sanitizes values of a given dictionary as follows:
- If string, number or boolean is given as a value, it is converted to string.
For string and number (int, float), it is converted to string by a built-in ``str()`` method.
For a boolean value, ``True`` is converted to "true" instead of "True", and ``False`` is
converted to "false" instead of "False", in order to keep the consistency with
Java configuration.
- If neither of the above is given as a value, it is inserted into the sanitized dict as it is.
These values will need to be serialized before adding to a protobuf message.
"""
if not isinstance(custom_config, dict):
raise TypeError("Component-specific configuration must be given as a dict type, given: %s"
% str(type(custom_config)))
sanitized = {}
for key, value in custom_config.items():
if not isinstance(key, str):
raise TypeError("Key for component-specific configuration must be string, given: %s:%s"
% (str(type(key)), str(key)))
if isinstance(value, bool):
sanitized[key] = "true" if value else "false"
elif isinstance(value, (str, int, float)):
sanitized[key] = str(value)
else:
sanitized[key] = value
return sanitized | Checks whether ``custom_config`` is sane and returns a sanitized dict <str -> (str|object)>
It checks if keys are all strings and sanitizes values of a given dictionary as follows:
- If string, number or boolean is given as a value, it is converted to string.
For string and number (int, float), it is converted to string by a built-in ``str()`` method.
For a boolean value, ``True`` is converted to "true" instead of "True", and ``False`` is
converted to "false" instead of "False", in order to keep the consistency with
Java configuration.
- If neither of the above is given as a value, it is inserted into the sanitized dict as it is.
These values will need to be serialized before adding to a protobuf message. | Below is the the instruction that describes the task:
### Input:
Checks whether ``custom_config`` is sane and returns a sanitized dict <str -> (str|object)>
It checks if keys are all strings and sanitizes values of a given dictionary as follows:
- If string, number or boolean is given as a value, it is converted to string.
For string and number (int, float), it is converted to string by a built-in ``str()`` method.
For a boolean value, ``True`` is converted to "true" instead of "True", and ``False`` is
converted to "false" instead of "False", in order to keep the consistency with
Java configuration.
- If neither of the above is given as a value, it is inserted into the sanitized dict as it is.
These values will need to be serialized before adding to a protobuf message.
### Response:
def _sanitize_config(custom_config):
"""Checks whether ``custom_config`` is sane and returns a sanitized dict <str -> (str|object)>
It checks if keys are all strings and sanitizes values of a given dictionary as follows:
- If string, number or boolean is given as a value, it is converted to string.
For string and number (int, float), it is converted to string by a built-in ``str()`` method.
For a boolean value, ``True`` is converted to "true" instead of "True", and ``False`` is
converted to "false" instead of "False", in order to keep the consistency with
Java configuration.
- If neither of the above is given as a value, it is inserted into the sanitized dict as it is.
These values will need to be serialized before adding to a protobuf message.
"""
if not isinstance(custom_config, dict):
raise TypeError("Component-specific configuration must be given as a dict type, given: %s"
% str(type(custom_config)))
sanitized = {}
for key, value in custom_config.items():
if not isinstance(key, str):
raise TypeError("Key for component-specific configuration must be string, given: %s:%s"
% (str(type(key)), str(key)))
if isinstance(value, bool):
sanitized[key] = "true" if value else "false"
elif isinstance(value, (str, int, float)):
sanitized[key] = str(value)
else:
sanitized[key] = value
return sanitized |
def output_forward(gandi, domain, forward, justify=14):
""" Helper to output a mail forward information."""
for dest in forward['destinations']:
output_line(gandi, forward['source'], dest, justify) | Helper to output a mail forward information. | Below is the the instruction that describes the task:
### Input:
Helper to output a mail forward information.
### Response:
def output_forward(gandi, domain, forward, justify=14):
""" Helper to output a mail forward information."""
for dest in forward['destinations']:
output_line(gandi, forward['source'], dest, justify) |
def InitPathInfos(self, client_id, path_infos):
"""Initializes a collection of path info records for a client.
Unlike `WritePathInfo`, this method clears stat and hash histories of paths
associated with path info records. This method is intended to be used only
in the data migration scripts.
Args:
client_id: A client identifier for which the paths are to be initialized.
path_infos: A list of `rdf_objects.PathInfo` objects to write.
"""
self.ClearPathHistory(client_id, path_infos)
self.WritePathInfos(client_id, path_infos) | Initializes a collection of path info records for a client.
Unlike `WritePathInfo`, this method clears stat and hash histories of paths
associated with path info records. This method is intended to be used only
in the data migration scripts.
Args:
client_id: A client identifier for which the paths are to be initialized.
path_infos: A list of `rdf_objects.PathInfo` objects to write. | Below is the the instruction that describes the task:
### Input:
Initializes a collection of path info records for a client.
Unlike `WritePathInfo`, this method clears stat and hash histories of paths
associated with path info records. This method is intended to be used only
in the data migration scripts.
Args:
client_id: A client identifier for which the paths are to be initialized.
path_infos: A list of `rdf_objects.PathInfo` objects to write.
### Response:
def InitPathInfos(self, client_id, path_infos):
"""Initializes a collection of path info records for a client.
Unlike `WritePathInfo`, this method clears stat and hash histories of paths
associated with path info records. This method is intended to be used only
in the data migration scripts.
Args:
client_id: A client identifier for which the paths are to be initialized.
path_infos: A list of `rdf_objects.PathInfo` objects to write.
"""
self.ClearPathHistory(client_id, path_infos)
self.WritePathInfos(client_id, path_infos) |
def _lexical_chains(self, doc, term_concept_map):
"""
Builds lexical chains, as an adjacency matrix,
using a disambiguated term-concept map.
"""
concepts = list({c for c in term_concept_map.values()})
# Build an adjacency matrix for the graph
# Using the encoding:
# 1 = identity/synonymy, 2 = hypernymy/hyponymy, 3 = meronymy, 0 = no edge
n_cons = len(concepts)
adj_mat = np.zeros((n_cons, n_cons))
for i, c in enumerate(concepts):
# TO DO can only do i >= j since the graph is undirected
for j, c_ in enumerate(concepts):
edge = 0
if c == c_:
edge = 1
# TO DO when should simulate root be True?
elif c_ in c._shortest_hypernym_paths(simulate_root=False).keys():
edge = 2
elif c in c_._shortest_hypernym_paths(simulate_root=False).keys():
edge = 2
elif c_ in c.member_meronyms() + c.part_meronyms() + c.substance_meronyms():
edge = 3
elif c in c_.member_meronyms() + c_.part_meronyms() + c_.substance_meronyms():
edge = 3
adj_mat[i,j] = edge
# Group connected concepts by labels
concept_labels = connected_components(adj_mat, directed=False)[1]
lexical_chains = [([], []) for i in range(max(concept_labels) + 1)]
for i, concept in enumerate(concepts):
label = concept_labels[i]
lexical_chains[label][0].append(concept)
lexical_chains[label][1].append(i)
# Return the lexical chains as (concept list, adjacency sub-matrix) tuples
return [(chain, adj_mat[indices][:,indices]) for chain, indices in lexical_chains] | Builds lexical chains, as an adjacency matrix,
using a disambiguated term-concept map. | Below is the the instruction that describes the task:
### Input:
Builds lexical chains, as an adjacency matrix,
using a disambiguated term-concept map.
### Response:
def _lexical_chains(self, doc, term_concept_map):
"""
Builds lexical chains, as an adjacency matrix,
using a disambiguated term-concept map.
"""
concepts = list({c for c in term_concept_map.values()})
# Build an adjacency matrix for the graph
# Using the encoding:
# 1 = identity/synonymy, 2 = hypernymy/hyponymy, 3 = meronymy, 0 = no edge
n_cons = len(concepts)
adj_mat = np.zeros((n_cons, n_cons))
for i, c in enumerate(concepts):
# TO DO can only do i >= j since the graph is undirected
for j, c_ in enumerate(concepts):
edge = 0
if c == c_:
edge = 1
# TO DO when should simulate root be True?
elif c_ in c._shortest_hypernym_paths(simulate_root=False).keys():
edge = 2
elif c in c_._shortest_hypernym_paths(simulate_root=False).keys():
edge = 2
elif c_ in c.member_meronyms() + c.part_meronyms() + c.substance_meronyms():
edge = 3
elif c in c_.member_meronyms() + c_.part_meronyms() + c_.substance_meronyms():
edge = 3
adj_mat[i,j] = edge
# Group connected concepts by labels
concept_labels = connected_components(adj_mat, directed=False)[1]
lexical_chains = [([], []) for i in range(max(concept_labels) + 1)]
for i, concept in enumerate(concepts):
label = concept_labels[i]
lexical_chains[label][0].append(concept)
lexical_chains[label][1].append(i)
# Return the lexical chains as (concept list, adjacency sub-matrix) tuples
return [(chain, adj_mat[indices][:,indices]) for chain, indices in lexical_chains] |
def configure_printing(**kwargs):
"""Context manager for temporarily changing the printing system.
This takes the same parameters as :func:`init_printing`
Example:
>>> A = OperatorSymbol('A', hs=1); B = OperatorSymbol('B', hs=1)
>>> with configure_printing(show_hs_label=False):
... print(ascii(A + B))
A + B
>>> print(ascii(A + B))
A^(1) + B^(1)
"""
freeze = init_printing(_freeze=True, **kwargs)
try:
yield
finally:
for obj, attr_map in freeze.items():
for attr, val in attr_map.items():
setattr(obj, attr, val) | Context manager for temporarily changing the printing system.
This takes the same parameters as :func:`init_printing`
Example:
>>> A = OperatorSymbol('A', hs=1); B = OperatorSymbol('B', hs=1)
>>> with configure_printing(show_hs_label=False):
... print(ascii(A + B))
A + B
>>> print(ascii(A + B))
A^(1) + B^(1) | Below is the the instruction that describes the task:
### Input:
Context manager for temporarily changing the printing system.
This takes the same parameters as :func:`init_printing`
Example:
>>> A = OperatorSymbol('A', hs=1); B = OperatorSymbol('B', hs=1)
>>> with configure_printing(show_hs_label=False):
... print(ascii(A + B))
A + B
>>> print(ascii(A + B))
A^(1) + B^(1)
### Response:
def configure_printing(**kwargs):
"""Context manager for temporarily changing the printing system.
This takes the same parameters as :func:`init_printing`
Example:
>>> A = OperatorSymbol('A', hs=1); B = OperatorSymbol('B', hs=1)
>>> with configure_printing(show_hs_label=False):
... print(ascii(A + B))
A + B
>>> print(ascii(A + B))
A^(1) + B^(1)
"""
freeze = init_printing(_freeze=True, **kwargs)
try:
yield
finally:
for obj, attr_map in freeze.items():
for attr, val in attr_map.items():
setattr(obj, attr, val) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'tones') and self.tones is not None:
_dict['tones'] = [x._to_dict() for x in self.tones]
if hasattr(self, 'category_id') and self.category_id is not None:
_dict['category_id'] = self.category_id
if hasattr(self, 'category_name') and self.category_name is not None:
_dict['category_name'] = self.category_name
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'tones') and self.tones is not None:
_dict['tones'] = [x._to_dict() for x in self.tones]
if hasattr(self, 'category_id') and self.category_id is not None:
_dict['category_id'] = self.category_id
if hasattr(self, 'category_name') and self.category_name is not None:
_dict['category_name'] = self.category_name
return _dict |
def init_train_args(self, parser):
'''Only invoked conditionally if subcommand is 'train' '''
parser.add_argument('-f', '--configuration', dest='config', default=DEFAULT_USER_CONFIG_PATH,
help='the path to the configuration file to use -- ./config.yaml by default')
parser.add_argument('-c', '--corpus-name', help='the name of the corpus or saved dataset to train on') | Only invoked conditionally if subcommand is 'train' | Below is the the instruction that describes the task:
### Input:
Only invoked conditionally if subcommand is 'train'
### Response:
def init_train_args(self, parser):
'''Only invoked conditionally if subcommand is 'train' '''
parser.add_argument('-f', '--configuration', dest='config', default=DEFAULT_USER_CONFIG_PATH,
help='the path to the configuration file to use -- ./config.yaml by default')
parser.add_argument('-c', '--corpus-name', help='the name of the corpus or saved dataset to train on') |
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2015-06-15 - Written - Bovy (IAS)
"""
dPhidrr= -(R**2.+z**2.+self._b2)**-1.5
return dPhidrr*R | NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2015-06-15 - Written - Bovy (IAS) | Below is the the instruction that describes the task:
### Input:
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2015-06-15 - Written - Bovy (IAS)
### Response:
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2015-06-15 - Written - Bovy (IAS)
"""
dPhidrr= -(R**2.+z**2.+self._b2)**-1.5
return dPhidrr*R |
def backward_char(event):
" Move back a character. "
buff = event.current_buffer
buff.cursor_position += buff.document.get_cursor_left_position(count=event.arg) | Move back a character. | Below is the the instruction that describes the task:
### Input:
Move back a character.
### Response:
def backward_char(event):
" Move back a character. "
buff = event.current_buffer
buff.cursor_position += buff.document.get_cursor_left_position(count=event.arg) |
def _quoted_text_handler_factory(delimiter, assertion, before, after, append_first=True,
on_close=lambda ctx: None):
"""Generates handlers for quoted text tokens (either short strings or quoted symbols).
Args:
delimiter (int): Ordinal of the quoted text's delimiter.
assertion (callable): Accepts the first character's ordinal, returning True if that character is a legal
beginning to the token.
before (callable): Called upon initialization. Accepts the first character's ordinal, the current context, True
if the token is a field name, and True if the token is a clob; returns the token's current value and True
if ``on_close`` should be called upon termination of the token.
after (callable): Called after termination of the token. Accepts the final character's ordinal, the current
context, and True if the token is a field name; returns a Transition.
append_first (Optional[bool]): True if the first character the coroutine receives is part of the text data, and
should therefore be appended to the value; otherwise, False (in which case, the first character must be
the delimiter).
on_close (Optional[callable]): Called upon termination of the token (before ``after``), if ``before`` indicated
that ``on_close`` should be called. Accepts the current context and returns a Transition. This is useful
for yielding a different kind of Transition based on initialization parameters given to ``before`` (e.g.
string vs. clob).
"""
@coroutine
def quoted_text_handler(c, ctx, is_field_name=False):
assert assertion(c)
def append():
if not _is_escaped_newline(c):
val.append(c)
is_clob = ctx.ion_type is IonType.CLOB
max_char = _MAX_CLOB_CHAR if is_clob else _MAX_TEXT_CHAR
ctx.set_unicode(quoted_text=True)
val, event_on_close = before(c, ctx, is_field_name, is_clob)
if append_first:
append()
c, self = yield
trans = ctx.immediate_transition(self)
done = False
while not done:
if c == delimiter and not _is_escaped(c):
done = True
if event_on_close:
trans = on_close(ctx)
else:
break
else:
_validate_short_quoted_text(c, ctx, max_char)
append()
c, _ = yield trans
yield after(c, ctx, is_field_name)
return quoted_text_handler | Generates handlers for quoted text tokens (either short strings or quoted symbols).
Args:
delimiter (int): Ordinal of the quoted text's delimiter.
assertion (callable): Accepts the first character's ordinal, returning True if that character is a legal
beginning to the token.
before (callable): Called upon initialization. Accepts the first character's ordinal, the current context, True
if the token is a field name, and True if the token is a clob; returns the token's current value and True
if ``on_close`` should be called upon termination of the token.
after (callable): Called after termination of the token. Accepts the final character's ordinal, the current
context, and True if the token is a field name; returns a Transition.
append_first (Optional[bool]): True if the first character the coroutine receives is part of the text data, and
should therefore be appended to the value; otherwise, False (in which case, the first character must be
the delimiter).
on_close (Optional[callable]): Called upon termination of the token (before ``after``), if ``before`` indicated
that ``on_close`` should be called. Accepts the current context and returns a Transition. This is useful
for yielding a different kind of Transition based on initialization parameters given to ``before`` (e.g.
string vs. clob). | Below is the the instruction that describes the task:
### Input:
Generates handlers for quoted text tokens (either short strings or quoted symbols).
Args:
delimiter (int): Ordinal of the quoted text's delimiter.
assertion (callable): Accepts the first character's ordinal, returning True if that character is a legal
beginning to the token.
before (callable): Called upon initialization. Accepts the first character's ordinal, the current context, True
if the token is a field name, and True if the token is a clob; returns the token's current value and True
if ``on_close`` should be called upon termination of the token.
after (callable): Called after termination of the token. Accepts the final character's ordinal, the current
context, and True if the token is a field name; returns a Transition.
append_first (Optional[bool]): True if the first character the coroutine receives is part of the text data, and
should therefore be appended to the value; otherwise, False (in which case, the first character must be
the delimiter).
on_close (Optional[callable]): Called upon termination of the token (before ``after``), if ``before`` indicated
that ``on_close`` should be called. Accepts the current context and returns a Transition. This is useful
for yielding a different kind of Transition based on initialization parameters given to ``before`` (e.g.
string vs. clob).
### Response:
def _quoted_text_handler_factory(delimiter, assertion, before, after, append_first=True,
on_close=lambda ctx: None):
"""Generates handlers for quoted text tokens (either short strings or quoted symbols).
Args:
delimiter (int): Ordinal of the quoted text's delimiter.
assertion (callable): Accepts the first character's ordinal, returning True if that character is a legal
beginning to the token.
before (callable): Called upon initialization. Accepts the first character's ordinal, the current context, True
if the token is a field name, and True if the token is a clob; returns the token's current value and True
if ``on_close`` should be called upon termination of the token.
after (callable): Called after termination of the token. Accepts the final character's ordinal, the current
context, and True if the token is a field name; returns a Transition.
append_first (Optional[bool]): True if the first character the coroutine receives is part of the text data, and
should therefore be appended to the value; otherwise, False (in which case, the first character must be
the delimiter).
on_close (Optional[callable]): Called upon termination of the token (before ``after``), if ``before`` indicated
that ``on_close`` should be called. Accepts the current context and returns a Transition. This is useful
for yielding a different kind of Transition based on initialization parameters given to ``before`` (e.g.
string vs. clob).
"""
@coroutine
def quoted_text_handler(c, ctx, is_field_name=False):
assert assertion(c)
def append():
if not _is_escaped_newline(c):
val.append(c)
is_clob = ctx.ion_type is IonType.CLOB
max_char = _MAX_CLOB_CHAR if is_clob else _MAX_TEXT_CHAR
ctx.set_unicode(quoted_text=True)
val, event_on_close = before(c, ctx, is_field_name, is_clob)
if append_first:
append()
c, self = yield
trans = ctx.immediate_transition(self)
done = False
while not done:
if c == delimiter and not _is_escaped(c):
done = True
if event_on_close:
trans = on_close(ctx)
else:
break
else:
_validate_short_quoted_text(c, ctx, max_char)
append()
c, _ = yield trans
yield after(c, ctx, is_field_name)
return quoted_text_handler |
def display_dataset(self):
"""Update the widget with information about the dataset."""
header = self.dataset.header
self.parent.setWindowTitle(basename(self.filename))
short_filename = short_strings(basename(self.filename))
self.idx_filename.setText(short_filename)
self.idx_s_freq.setText(str(header['s_freq']))
self.idx_n_chan.setText(str(len(header['chan_name'])))
start_time = header['start_time'].strftime('%b-%d %H:%M:%S')
self.idx_start_time.setText(start_time)
end_time = (header['start_time'] +
timedelta(seconds=header['n_samples'] / header['s_freq']))
self.idx_end_time.setText(end_time.strftime('%b-%d %H:%M:%S')) | Update the widget with information about the dataset. | Below is the the instruction that describes the task:
### Input:
Update the widget with information about the dataset.
### Response:
def display_dataset(self):
"""Update the widget with information about the dataset."""
header = self.dataset.header
self.parent.setWindowTitle(basename(self.filename))
short_filename = short_strings(basename(self.filename))
self.idx_filename.setText(short_filename)
self.idx_s_freq.setText(str(header['s_freq']))
self.idx_n_chan.setText(str(len(header['chan_name'])))
start_time = header['start_time'].strftime('%b-%d %H:%M:%S')
self.idx_start_time.setText(start_time)
end_time = (header['start_time'] +
timedelta(seconds=header['n_samples'] / header['s_freq']))
self.idx_end_time.setText(end_time.strftime('%b-%d %H:%M:%S')) |
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result | Returns a dictionary of user info that google stores. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of user info that google stores.
### Response:
def getUserInfo(self):
"""
Returns a dictionary of user info that google stores.
"""
userJson = self.httpGet(ReaderUrl.USER_INFO_URL)
result = json.loads(userJson, strict=False)
self.userId = result['userId']
return result |
def set( string, target_level, indent_string=" ", indent_empty_lines=False ):
""" Sets indentation of a single/multi-line string. """
lines = string.splitlines()
set_lines( lines, target_level, indent_string=indent_string, indent_empty_lines=indent_empty_lines )
result = "\n".join(lines)
return result | Sets indentation of a single/multi-line string. | Below is the the instruction that describes the task:
### Input:
Sets indentation of a single/multi-line string.
### Response:
def set( string, target_level, indent_string=" ", indent_empty_lines=False ):
""" Sets indentation of a single/multi-line string. """
lines = string.splitlines()
set_lines( lines, target_level, indent_string=indent_string, indent_empty_lines=indent_empty_lines )
result = "\n".join(lines)
return result |
def _create_date_slug(self):
"""Prefixes the slug with the ``published_on`` date."""
if not self.pk:
# haven't saved this yet, so use today's date
d = utc_now()
elif self.published and self.published_on:
# use the actual published on date
d = self.published_on
elif self.updated_on:
# default to the last-updated date
d = self.updated_on
self.date_slug = u"{0}/{1}".format(d.strftime("%Y/%m/%d"), self.slug) | Prefixes the slug with the ``published_on`` date. | Below is the the instruction that describes the task:
### Input:
Prefixes the slug with the ``published_on`` date.
### Response:
def _create_date_slug(self):
"""Prefixes the slug with the ``published_on`` date."""
if not self.pk:
# haven't saved this yet, so use today's date
d = utc_now()
elif self.published and self.published_on:
# use the actual published on date
d = self.published_on
elif self.updated_on:
# default to the last-updated date
d = self.updated_on
self.date_slug = u"{0}/{1}".format(d.strftime("%Y/%m/%d"), self.slug) |
def boolean_arg(ctx, obj):
'''
Handles LiteralObjects as well as computable arguments
'''
if hasattr(obj, 'compute'):
obj = next(obj.compute(ctx), False)
return to_boolean(obj) | Handles LiteralObjects as well as computable arguments | Below is the the instruction that describes the task:
### Input:
Handles LiteralObjects as well as computable arguments
### Response:
def boolean_arg(ctx, obj):
'''
Handles LiteralObjects as well as computable arguments
'''
if hasattr(obj, 'compute'):
obj = next(obj.compute(ctx), False)
return to_boolean(obj) |
def create_essay_set(text, score, prompt_string, generate_additional=True):
"""
Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not.
"""
x = EssaySet()
for i in xrange(0, len(text)):
x.add_essay(text[i], score[i])
if score[i] == min(score) and generate_additional == True:
x.generate_additional_essays(x._clean_text[len(x._clean_text) - 1], score[i])
x.update_prompt(prompt_string)
return x | Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not. | Below is the the instruction that describes the task:
### Input:
Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not.
### Response:
def create_essay_set(text, score, prompt_string, generate_additional=True):
"""
Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not.
"""
x = EssaySet()
for i in xrange(0, len(text)):
x.add_essay(text[i], score[i])
if score[i] == min(score) and generate_additional == True:
x.generate_additional_essays(x._clean_text[len(x._clean_text) - 1], score[i])
x.update_prompt(prompt_string)
return x |
def create_project(self):
"""Create project."""
packages = ['python={0}'.format(self.combo_python_version.currentText())]
self.sig_project_creation_requested.emit(
self.text_location.text(),
self.combo_project_type.currentText(),
packages)
self.accept() | Create project. | Below is the the instruction that describes the task:
### Input:
Create project.
### Response:
def create_project(self):
"""Create project."""
packages = ['python={0}'.format(self.combo_python_version.currentText())]
self.sig_project_creation_requested.emit(
self.text_location.text(),
self.combo_project_type.currentText(),
packages)
self.accept() |
def __assembleURL(self, url, groupId):
"""private function that assembles the URL for the community.Group
class"""
from ..packages.six.moves.urllib_parse import urlparse
parsed = urlparse(url)
communityURL = "%s://%s%s/sharing/rest/community/groups/%s" % (parsed.scheme, parsed.netloc,
parsed.path.lower().split('/sharing/rest/')[0],
groupId)
return communityURL | private function that assembles the URL for the community.Group
class | Below is the the instruction that describes the task:
### Input:
private function that assembles the URL for the community.Group
class
### Response:
def __assembleURL(self, url, groupId):
"""private function that assembles the URL for the community.Group
class"""
from ..packages.six.moves.urllib_parse import urlparse
parsed = urlparse(url)
communityURL = "%s://%s%s/sharing/rest/community/groups/%s" % (parsed.scheme, parsed.netloc,
parsed.path.lower().split('/sharing/rest/')[0],
groupId)
return communityURL |
def _raise_duplicate_msg_id(symbol, msgid, other_msgid):
"""Raise an error when a msgid is duplicated.
:param str symbol: The symbol corresponding to the msgids
:param str msgid: Offending msgid
:param str other_msgid: Other offending msgid
:raises InvalidMessageError: when a msgid is duplicated.
"""
msgids = [msgid, other_msgid]
msgids.sort()
error_message = "Message symbol '{symbol}' cannot be used for ".format(
symbol=symbol
)
error_message += "'{other_msgid}' and '{msgid}' at the same time.".format(
other_msgid=msgids[0], msgid=msgids[1]
)
raise InvalidMessageError(error_message) | Raise an error when a msgid is duplicated.
:param str symbol: The symbol corresponding to the msgids
:param str msgid: Offending msgid
:param str other_msgid: Other offending msgid
:raises InvalidMessageError: when a msgid is duplicated. | Below is the the instruction that describes the task:
### Input:
Raise an error when a msgid is duplicated.
:param str symbol: The symbol corresponding to the msgids
:param str msgid: Offending msgid
:param str other_msgid: Other offending msgid
:raises InvalidMessageError: when a msgid is duplicated.
### Response:
def _raise_duplicate_msg_id(symbol, msgid, other_msgid):
"""Raise an error when a msgid is duplicated.
:param str symbol: The symbol corresponding to the msgids
:param str msgid: Offending msgid
:param str other_msgid: Other offending msgid
:raises InvalidMessageError: when a msgid is duplicated.
"""
msgids = [msgid, other_msgid]
msgids.sort()
error_message = "Message symbol '{symbol}' cannot be used for ".format(
symbol=symbol
)
error_message += "'{other_msgid}' and '{msgid}' at the same time.".format(
other_msgid=msgids[0], msgid=msgids[1]
)
raise InvalidMessageError(error_message) |
def _len_list(obj):
'''Length of list (estimate).
'''
n = len(obj)
# estimate over-allocation
if n > 8:
n += 6 + (n >> 3)
elif n:
n += 4
return n | Length of list (estimate). | Below is the the instruction that describes the task:
### Input:
Length of list (estimate).
### Response:
def _len_list(obj):
'''Length of list (estimate).
'''
n = len(obj)
# estimate over-allocation
if n > 8:
n += 6 + (n >> 3)
elif n:
n += 4
return n |
def ppf(self, u):
"""
Note: if dim(u) < self.dim, the remaining columns are filled with 0
Useful in case the distribution is partly degenerate
"""
N, du = u.shape
if du < self.dim:
z = np.zeros((N, self.dim))
z[:, :du] = stats.norm.ppf(u)
else:
z = stats.norm.ppf(u)
return self.linear_transform(z) | Note: if dim(u) < self.dim, the remaining columns are filled with 0
Useful in case the distribution is partly degenerate | Below is the the instruction that describes the task:
### Input:
Note: if dim(u) < self.dim, the remaining columns are filled with 0
Useful in case the distribution is partly degenerate
### Response:
def ppf(self, u):
"""
Note: if dim(u) < self.dim, the remaining columns are filled with 0
Useful in case the distribution is partly degenerate
"""
N, du = u.shape
if du < self.dim:
z = np.zeros((N, self.dim))
z[:, :du] = stats.norm.ppf(u)
else:
z = stats.norm.ppf(u)
return self.linear_transform(z) |
def rrs(ax, b):
r"""
Compute relative residual :math:`\|\mathbf{b} - A \mathbf{x}\|_2 /
\|\mathbf{b}\|_2` of the solution to a linear equation :math:`A \mathbf{x}
= \mathbf{b}`. Returns 1.0 if :math:`\mathbf{b} = 0`.
Parameters
----------
ax : array_like
Linear component :math:`A \mathbf{x}` of equation
b : array_like
Constant component :math:`\mathbf{b}` of equation
Returns
-------
x : float
Relative residual
"""
nrm = np.linalg.norm(b.ravel())
if nrm == 0.0:
return 1.0
else:
return np.linalg.norm((ax - b).ravel()) / nrm | r"""
Compute relative residual :math:`\|\mathbf{b} - A \mathbf{x}\|_2 /
\|\mathbf{b}\|_2` of the solution to a linear equation :math:`A \mathbf{x}
= \mathbf{b}`. Returns 1.0 if :math:`\mathbf{b} = 0`.
Parameters
----------
ax : array_like
Linear component :math:`A \mathbf{x}` of equation
b : array_like
Constant component :math:`\mathbf{b}` of equation
Returns
-------
x : float
Relative residual | Below is the the instruction that describes the task:
### Input:
r"""
Compute relative residual :math:`\|\mathbf{b} - A \mathbf{x}\|_2 /
\|\mathbf{b}\|_2` of the solution to a linear equation :math:`A \mathbf{x}
= \mathbf{b}`. Returns 1.0 if :math:`\mathbf{b} = 0`.
Parameters
----------
ax : array_like
Linear component :math:`A \mathbf{x}` of equation
b : array_like
Constant component :math:`\mathbf{b}` of equation
Returns
-------
x : float
Relative residual
### Response:
def rrs(ax, b):
r"""
Compute relative residual :math:`\|\mathbf{b} - A \mathbf{x}\|_2 /
\|\mathbf{b}\|_2` of the solution to a linear equation :math:`A \mathbf{x}
= \mathbf{b}`. Returns 1.0 if :math:`\mathbf{b} = 0`.
Parameters
----------
ax : array_like
Linear component :math:`A \mathbf{x}` of equation
b : array_like
Constant component :math:`\mathbf{b}` of equation
Returns
-------
x : float
Relative residual
"""
nrm = np.linalg.norm(b.ravel())
if nrm == 0.0:
return 1.0
else:
return np.linalg.norm((ax - b).ravel()) / nrm |
def trim_Ns(self):
'''Removes any leading or trailing N or n characters from the sequence'''
# get index of first base that is not an N
i = 0
while i < len(self) and self.seq[i] in 'nN':
i += 1
# strip off start of sequence and quality
self.seq = self.seq[i:]
self.qual = self.qual[i:]
# strip the ends
self.seq = self.seq.rstrip('Nn')
self.qual = self.qual[:len(self.seq)] | Removes any leading or trailing N or n characters from the sequence | Below is the the instruction that describes the task:
### Input:
Removes any leading or trailing N or n characters from the sequence
### Response:
def trim_Ns(self):
'''Removes any leading or trailing N or n characters from the sequence'''
# get index of first base that is not an N
i = 0
while i < len(self) and self.seq[i] in 'nN':
i += 1
# strip off start of sequence and quality
self.seq = self.seq[i:]
self.qual = self.qual[i:]
# strip the ends
self.seq = self.seq.rstrip('Nn')
self.qual = self.qual[:len(self.seq)] |
def bump(self, level='patch', label=None):
""" Bump version following semantic versioning rules. """
bump = self._bump_pre if level == 'pre' else self._bump
bump(level, label) | Bump version following semantic versioning rules. | Below is the the instruction that describes the task:
### Input:
Bump version following semantic versioning rules.
### Response:
def bump(self, level='patch', label=None):
""" Bump version following semantic versioning rules. """
bump = self._bump_pre if level == 'pre' else self._bump
bump(level, label) |
def getValue(self):
"""
Returns PromisedRequirement value
"""
func = dill.loads(self._func)
return func(*self._args) | Returns PromisedRequirement value | Below is the the instruction that describes the task:
### Input:
Returns PromisedRequirement value
### Response:
def getValue(self):
"""
Returns PromisedRequirement value
"""
func = dill.loads(self._func)
return func(*self._args) |
def get_child(parent, tag_name, root_or_cache, namespace):
# type: (_Element, str, _DocRoot, str) -> typing.Optional[_Element]
"""Get first sub-child or referenced sub-child with given name."""
# logger.debug("get_child: " + tag_name)
if parent is None:
return None
ret = parent.find('.//' + namespace + tag_name)
if ret is None: # no direct element - try reference
reference = parent.find('.//' + namespace + tag_name + '-REF')
if reference is not None:
if isinstance(root_or_cache, ArTree):
ret = get_cached_element_by_path(root_or_cache, reference.text)
else:
ret = get_element_by_path(root_or_cache, reference.text, namespace)
return ret | Get first sub-child or referenced sub-child with given name. | Below is the the instruction that describes the task:
### Input:
Get first sub-child or referenced sub-child with given name.
### Response:
def get_child(parent, tag_name, root_or_cache, namespace):
# type: (_Element, str, _DocRoot, str) -> typing.Optional[_Element]
"""Get first sub-child or referenced sub-child with given name."""
# logger.debug("get_child: " + tag_name)
if parent is None:
return None
ret = parent.find('.//' + namespace + tag_name)
if ret is None: # no direct element - try reference
reference = parent.find('.//' + namespace + tag_name + '-REF')
if reference is not None:
if isinstance(root_or_cache, ArTree):
ret = get_cached_element_by_path(root_or_cache, reference.text)
else:
ret = get_element_by_path(root_or_cache, reference.text, namespace)
return ret |
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
q = scipy.atleast_1d(q)
if len(q) != self.num_var:
raise ValueError("length of q must equal the number of parameters!")
if q.ndim != 1:
raise ValueError("q must be one-dimensional!")
if (q < 0).any() or (q > 1).any():
raise ValueError("q must be within [0, 1]!")
# Old way, not quite correct:
# q = scipy.sort(q)
# return scipy.asarray([(self.ub - self.lb) * v + self.lb for v in q])
# New way, based on conditional marginals:
out = scipy.zeros_like(q, dtype=float)
out[0] = self.lb
for d in xrange(0, len(out)):
out[d] = (
(1.0 - (1.0 - q[d])**(1.0 / (self.num_var - d))) *
(self.ub - out[max(d - 1, 0)]) + out[max(d - 1, 0)]
)
return out | r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at. | Below is the the instruction that describes the task:
### Input:
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
### Response:
def sample_u(self, q):
r"""Extract a sample from random variates uniform on :math:`[0, 1]`.
For a univariate distribution, this is simply evaluating the inverse
CDF. To facilitate efficient sampling, this function returns a *vector*
of PPF values, one value for each variable. Basically, the idea is that,
given a vector :math:`q` of `num_params` values each of which is
distributed uniformly on :math:`[0, 1]`, this function will return
corresponding samples for each variable.
Parameters
----------
q : array of float
Values between 0 and 1 to evaluate inverse CDF at.
"""
q = scipy.atleast_1d(q)
if len(q) != self.num_var:
raise ValueError("length of q must equal the number of parameters!")
if q.ndim != 1:
raise ValueError("q must be one-dimensional!")
if (q < 0).any() or (q > 1).any():
raise ValueError("q must be within [0, 1]!")
# Old way, not quite correct:
# q = scipy.sort(q)
# return scipy.asarray([(self.ub - self.lb) * v + self.lb for v in q])
# New way, based on conditional marginals:
out = scipy.zeros_like(q, dtype=float)
out[0] = self.lb
for d in xrange(0, len(out)):
out[d] = (
(1.0 - (1.0 - q[d])**(1.0 / (self.num_var - d))) *
(self.ub - out[max(d - 1, 0)]) + out[max(d - 1, 0)]
)
return out |
def turn_physical_on(self,ro=None,vo=None):
"""
NAME:
turn_physical_on
PURPOSE:
turn on automatic returning of outputs in physical units
INPUT:
ro= reference distance (kpc)
vo= reference velocity (km/s)
OUTPUT:
(none)
HISTORY:
2016-01-19 - Written - Bovy (UofT)
"""
self._roSet= True
self._voSet= True
if not ro is None:
self._ro= ro
if not vo is None:
self._vo= vo
return None | NAME:
turn_physical_on
PURPOSE:
turn on automatic returning of outputs in physical units
INPUT:
ro= reference distance (kpc)
vo= reference velocity (km/s)
OUTPUT:
(none)
HISTORY:
2016-01-19 - Written - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
turn_physical_on
PURPOSE:
turn on automatic returning of outputs in physical units
INPUT:
ro= reference distance (kpc)
vo= reference velocity (km/s)
OUTPUT:
(none)
HISTORY:
2016-01-19 - Written - Bovy (UofT)
### Response:
def turn_physical_on(self,ro=None,vo=None):
"""
NAME:
turn_physical_on
PURPOSE:
turn on automatic returning of outputs in physical units
INPUT:
ro= reference distance (kpc)
vo= reference velocity (km/s)
OUTPUT:
(none)
HISTORY:
2016-01-19 - Written - Bovy (UofT)
"""
self._roSet= True
self._voSet= True
if not ro is None:
self._ro= ro
if not vo is None:
self._vo= vo
return None |
def getValue(self):
"""
Get the value of the previously POSTed Tropo action.
"""
actions = self._actions
if (type (actions) is list):
dict = actions[0]
else:
dict = actions
return dict.get('value', 'NoValue') | Get the value of the previously POSTed Tropo action. | Below is the the instruction that describes the task:
### Input:
Get the value of the previously POSTed Tropo action.
### Response:
def getValue(self):
"""
Get the value of the previously POSTed Tropo action.
"""
actions = self._actions
if (type (actions) is list):
dict = actions[0]
else:
dict = actions
return dict.get('value', 'NoValue') |
def setup_options():
"""Sets Streamlink options."""
if args.hls_live_edge:
streamlink.set_option("hls-live-edge", args.hls_live_edge)
if args.hls_segment_attempts:
streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts)
if args.hls_playlist_reload_attempts:
streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts)
if args.hls_segment_threads:
streamlink.set_option("hls-segment-threads", args.hls_segment_threads)
if args.hls_segment_timeout:
streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout)
if args.hls_segment_ignore_names:
streamlink.set_option("hls-segment-ignore-names", args.hls_segment_ignore_names)
if args.hls_segment_key_uri:
streamlink.set_option("hls-segment-key-uri", args.hls_segment_key_uri)
if args.hls_timeout:
streamlink.set_option("hls-timeout", args.hls_timeout)
if args.hls_audio_select:
streamlink.set_option("hls-audio-select", args.hls_audio_select)
if args.hls_start_offset:
streamlink.set_option("hls-start-offset", args.hls_start_offset)
if args.hls_duration:
streamlink.set_option("hls-duration", args.hls_duration)
if args.hls_live_restart:
streamlink.set_option("hls-live-restart", args.hls_live_restart)
if args.hds_live_edge:
streamlink.set_option("hds-live-edge", args.hds_live_edge)
if args.hds_segment_attempts:
streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts)
if args.hds_segment_threads:
streamlink.set_option("hds-segment-threads", args.hds_segment_threads)
if args.hds_segment_timeout:
streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout)
if args.hds_timeout:
streamlink.set_option("hds-timeout", args.hds_timeout)
if args.http_stream_timeout:
streamlink.set_option("http-stream-timeout", args.http_stream_timeout)
if args.ringbuffer_size:
streamlink.set_option("ringbuffer-size", args.ringbuffer_size)
if args.rtmp_proxy:
streamlink.set_option("rtmp-proxy", args.rtmp_proxy)
if args.rtmp_rtmpdump:
streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump)
if args.rtmp_timeout:
streamlink.set_option("rtmp-timeout", args.rtmp_timeout)
if args.stream_segment_attempts:
streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts)
if args.stream_segment_threads:
streamlink.set_option("stream-segment-threads", args.stream_segment_threads)
if args.stream_segment_timeout:
streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout)
if args.stream_timeout:
streamlink.set_option("stream-timeout", args.stream_timeout)
if args.ffmpeg_ffmpeg:
streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg)
if args.ffmpeg_verbose:
streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose)
if args.ffmpeg_verbose_path:
streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path)
if args.ffmpeg_video_transcode:
streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode)
if args.ffmpeg_audio_transcode:
streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode)
streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog)
streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path)
streamlink.set_option("locale", args.locale) | Sets Streamlink options. | Below is the the instruction that describes the task:
### Input:
Sets Streamlink options.
### Response:
def setup_options():
"""Sets Streamlink options."""
if args.hls_live_edge:
streamlink.set_option("hls-live-edge", args.hls_live_edge)
if args.hls_segment_attempts:
streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts)
if args.hls_playlist_reload_attempts:
streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts)
if args.hls_segment_threads:
streamlink.set_option("hls-segment-threads", args.hls_segment_threads)
if args.hls_segment_timeout:
streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout)
if args.hls_segment_ignore_names:
streamlink.set_option("hls-segment-ignore-names", args.hls_segment_ignore_names)
if args.hls_segment_key_uri:
streamlink.set_option("hls-segment-key-uri", args.hls_segment_key_uri)
if args.hls_timeout:
streamlink.set_option("hls-timeout", args.hls_timeout)
if args.hls_audio_select:
streamlink.set_option("hls-audio-select", args.hls_audio_select)
if args.hls_start_offset:
streamlink.set_option("hls-start-offset", args.hls_start_offset)
if args.hls_duration:
streamlink.set_option("hls-duration", args.hls_duration)
if args.hls_live_restart:
streamlink.set_option("hls-live-restart", args.hls_live_restart)
if args.hds_live_edge:
streamlink.set_option("hds-live-edge", args.hds_live_edge)
if args.hds_segment_attempts:
streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts)
if args.hds_segment_threads:
streamlink.set_option("hds-segment-threads", args.hds_segment_threads)
if args.hds_segment_timeout:
streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout)
if args.hds_timeout:
streamlink.set_option("hds-timeout", args.hds_timeout)
if args.http_stream_timeout:
streamlink.set_option("http-stream-timeout", args.http_stream_timeout)
if args.ringbuffer_size:
streamlink.set_option("ringbuffer-size", args.ringbuffer_size)
if args.rtmp_proxy:
streamlink.set_option("rtmp-proxy", args.rtmp_proxy)
if args.rtmp_rtmpdump:
streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump)
if args.rtmp_timeout:
streamlink.set_option("rtmp-timeout", args.rtmp_timeout)
if args.stream_segment_attempts:
streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts)
if args.stream_segment_threads:
streamlink.set_option("stream-segment-threads", args.stream_segment_threads)
if args.stream_segment_timeout:
streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout)
if args.stream_timeout:
streamlink.set_option("stream-timeout", args.stream_timeout)
if args.ffmpeg_ffmpeg:
streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg)
if args.ffmpeg_verbose:
streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose)
if args.ffmpeg_verbose_path:
streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path)
if args.ffmpeg_video_transcode:
streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode)
if args.ffmpeg_audio_transcode:
streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode)
streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog)
streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path)
streamlink.set_option("locale", args.locale) |
def print_file(self, f=sys.stdout, file_format="cif", tw=0):
"""Print :class:`~nmrstarlib.nmrstarlib.CIFFile` into a file or stdout.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `cif` or `json`.
:param int tw: Tab width.
:return: None
:rtype: :py:obj:`None`
"""
if file_format == "cif":
for key in self.keys():
if key == u"data":
print(u"{}_{}".format(key, self[key]), file=f)
elif key.startswith(u"comment"):
print(u"{}".format(self[key].strip()), file=f)
elif key.startswith(u"loop_"):
print(u"{}loop_".format(tw * u" "), file=f)
self.print_loop(key, f, file_format, tw)
else:
# handle the NMR-Star "multiline string"
if self[key].endswith(u"\n"):
print(u"{}_{}".format(tw * u" ", key), file=f)
print(u";{};".format(self[key]), file=f)
# need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words
elif len(self[key].split()) > 1:
print(u"{}_{}\t {}".format(tw * u" ", key, u"'{}'".format(self[key])), file=f)
else:
print(u"{}_{}\t {}".format(tw * u" ", key, self[key]), file=f)
elif file_format == "json":
print(self._to_json(), file=f) | Print :class:`~nmrstarlib.nmrstarlib.CIFFile` into a file or stdout.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `cif` or `json`.
:param int tw: Tab width.
:return: None
:rtype: :py:obj:`None` | Below is the the instruction that describes the task:
### Input:
Print :class:`~nmrstarlib.nmrstarlib.CIFFile` into a file or stdout.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `cif` or `json`.
:param int tw: Tab width.
:return: None
:rtype: :py:obj:`None`
### Response:
def print_file(self, f=sys.stdout, file_format="cif", tw=0):
"""Print :class:`~nmrstarlib.nmrstarlib.CIFFile` into a file or stdout.
:param io.StringIO f: writable file-like stream.
:param str file_format: Format to use: `cif` or `json`.
:param int tw: Tab width.
:return: None
:rtype: :py:obj:`None`
"""
if file_format == "cif":
for key in self.keys():
if key == u"data":
print(u"{}_{}".format(key, self[key]), file=f)
elif key.startswith(u"comment"):
print(u"{}".format(self[key].strip()), file=f)
elif key.startswith(u"loop_"):
print(u"{}loop_".format(tw * u" "), file=f)
self.print_loop(key, f, file_format, tw)
else:
# handle the NMR-Star "multiline string"
if self[key].endswith(u"\n"):
print(u"{}_{}".format(tw * u" ", key), file=f)
print(u";{};".format(self[key]), file=f)
# need to escape value with quotes (i.e. u"'{}'".format()) if value consists of two or more words
elif len(self[key].split()) > 1:
print(u"{}_{}\t {}".format(tw * u" ", key, u"'{}'".format(self[key])), file=f)
else:
print(u"{}_{}\t {}".format(tw * u" ", key, self[key]), file=f)
elif file_format == "json":
print(self._to_json(), file=f) |
def extract_graphs_from_tweets(tweet_generator):
"""
Given a tweet python generator, we encode the information into mention and retweet graphs.
We assume that the tweets are given in increasing timestamp.
Inputs: - tweet_generator: A python generator of tweets in python dictionary (json) format.
Outputs: - mention_graph: The mention graph as a SciPy sparse matrix.
- user_id_set: A python set containing the Twitter ids for all the dataset users.
- node_to_id: A python dictionary that maps from node anonymized ids, to twitter user ids.
"""
####################################################################################################################
# Prepare for iterating over tweets.
####################################################################################################################
# These are initialized as lists for incremental extension.
tweet_id_set = set()
user_id_set = list()
twitter_to_reveal_user_id = dict()
add_tweet_id = tweet_id_set.add
append_user_id = user_id_set.append
# Initialize sparse matrix arrays.
mention_graph_row = list()
mention_graph_col = list()
retweet_graph_row = list()
retweet_graph_col = list()
append_mention_graph_row = mention_graph_row.append
append_mention_graph_col = mention_graph_col.append
append_retweet_graph_row = retweet_graph_row.append
append_retweet_graph_col = retweet_graph_col.append
# Initialize dictionaries.
id_to_node = dict()
id_to_name = dict()
id_to_username = dict()
id_to_listedcount = dict()
####################################################################################################################
# Iterate over tweets.
####################################################################################################################
counter = 0
for tweet in tweet_generator:
# Increment tweet counter.
counter += 1
if counter % 10000 == 0:
print(counter)
# print(counter)
# Extract base tweet's values.
try:
tweet_id = tweet["id"]
user_id = tweet["user"]["id"]
user_screen_name = tweet["user"]["screen_name"]
user_name = tweet["user"]["name"]
listed_count_raw = tweet["user"]["listed_count"]
tweet_in_reply_to_user_id = tweet["in_reply_to_user_id"]
tweet_in_reply_to_screen_name = tweet["in_reply_to_screen_name"]
tweet_entities_user_mentions = tweet["entities"]["user_mentions"]
except KeyError:
continue
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
source_node = id_to_node.setdefault(user_id, graph_size)
if listed_count_raw is None:
id_to_listedcount[user_id] = 0
else:
id_to_listedcount[user_id] = int(listed_count_raw)
# Update sets, lists and dictionaries.
add_tweet_id(tweet_id)
id_to_name[user_id] = user_screen_name
id_to_username[user_id] = user_name
append_user_id(user_id)
# twitter_to_user_id
################################################################################################################
# We are dealing with an original tweet.
################################################################################################################
if "retweeted_status" not in tweet.keys():
############################################################################################################
# Update mention matrix.
############################################################################################################
# Get mentioned user ids.
mentioned_user_id_set = list()
if tweet_in_reply_to_user_id is not None:
mentioned_user_id_set.append(tweet_in_reply_to_user_id)
id_to_name[tweet_in_reply_to_user_id] = tweet_in_reply_to_screen_name
for user_mention in tweet_entities_user_mentions:
mentioned_user_id = user_mention["id"] # TODO: Perhaps safe extract as well.
mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = user_mention["screen_name"] # TODO: Perhaps safe extract as well.
# We remove duplicates.
mentioned_user_id_set = set(mentioned_user_id_set)
# Update the mention graph one-by-one.
for mentioned_user_id in mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(source_node)
append_mention_graph_col(mention_target_node)
################################################################################################################
# We are dealing with a retweet.
################################################################################################################
else:
# Extract base tweet's values.
original_tweet = tweet["retweeted_status"]
try:
original_tweet_id = original_tweet["id"]
original_tweet_user_id = original_tweet["user"]["id"]
original_tweet_user_screen_name = original_tweet["user"]["screen_name"]
original_tweet_user_name = original_tweet["user"]["name"]
listed_count_raw = original_tweet["user"]["listed_count"]
original_tweet_in_reply_to_user_id = original_tweet["in_reply_to_user_id"]
original_tweet_in_reply_to_screen_name = original_tweet["in_reply_to_screen_name"]
original_tweet_entities_user_mentions = original_tweet["entities"]["user_mentions"]
except KeyError:
continue
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
original_tweet_node = id_to_node.setdefault(original_tweet_user_id, graph_size)
if listed_count_raw is None:
id_to_listedcount[original_tweet_user_id] = 0
else:
id_to_listedcount[original_tweet_user_id] = int(listed_count_raw)
# Update retweet graph.
append_retweet_graph_row(source_node)
append_retweet_graph_col(original_tweet_node)
# Get mentioned user ids.
mentioned_user_id_set = list()
if original_tweet_in_reply_to_user_id is not None:
mentioned_user_id_set.append(original_tweet_in_reply_to_user_id)
id_to_name[original_tweet_in_reply_to_user_id] = original_tweet_in_reply_to_screen_name
for user_mention in original_tweet_entities_user_mentions:
mentioned_user_id = user_mention["id"] # TODO: Perhaps safe extract as well.
mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = user_mention["screen_name"] # TODO: Perhaps safe extract as well.
# We remove duplicates.
mentioned_user_id_set = set(mentioned_user_id_set)
# Get mentioned user ids.
retweet_mentioned_user_id_set = list()
if original_tweet_in_reply_to_user_id is not None:
retweet_mentioned_user_id_set.append(original_tweet_in_reply_to_user_id)
id_to_name[original_tweet_in_reply_to_user_id] = original_tweet_in_reply_to_screen_name
for user_mention in original_tweet_entities_user_mentions:
mentioned_user_id = user_mention["id"] # TODO: Perhaps safe extract as well.
retweet_mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = user_mention["screen_name"] # TODO: Perhaps safe extract as well.
# We remove duplicates.
retweet_mentioned_user_id_set = set(retweet_mentioned_user_id_set)
mentioned_user_id_set.update(retweet_mentioned_user_id_set)
# Update the mention graph one-by-one.
for mentioned_user_id in mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(source_node)
append_mention_graph_col(mention_target_node)
# This is the first time we deal with this tweet.
if original_tweet_id not in tweet_id_set:
# Update sets, lists and dictionaries.
add_tweet_id(original_tweet_id)
id_to_name[original_tweet_user_id] = original_tweet_user_screen_name
id_to_username[original_tweet_user_id] = original_tweet_user_name
append_user_id(original_tweet_user_id)
########################################################################################################
# Update mention matrix.
########################################################################################################
# Update the mention graph one-by-one.
for mentioned_user_id in retweet_mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(original_tweet_node)
append_mention_graph_col(mention_target_node)
else:
pass
####################################################################################################################
# Final steps of preprocessing tweets.
####################################################################################################################
# Discard any duplicates.
user_id_set = set(user_id_set)
number_of_users = len(user_id_set)
# min_number_of_users = max(user_id_set) + 1
# Form mention graph adjacency matrix.
mention_graph_row = np.array(mention_graph_row, dtype=np.int64)
mention_graph_col = np.array(mention_graph_col, dtype=np.int64)
mention_graph_data = np.ones_like(mention_graph_row, dtype=np.float64)
mention_graph = spsp.coo_matrix((mention_graph_data, (mention_graph_row, mention_graph_col)),
shape=(number_of_users, number_of_users))
mention_graph = spsp.coo_matrix(spsp.csr_matrix(mention_graph))
# Form retweet graph adjacency matrix.
retweet_graph_row = np.array(retweet_graph_row, dtype=np.int64)
retweet_graph_col = np.array(retweet_graph_col, dtype=np.int64)
retweet_graph_data = np.ones_like(retweet_graph_row, dtype=np.float64)
retweet_graph = spsp.coo_matrix((retweet_graph_data, (retweet_graph_row, retweet_graph_col)),
shape=(number_of_users, number_of_users))
retweet_graph = spsp.coo_matrix(spsp.csr_matrix(retweet_graph))
node_to_id = dict(zip(id_to_node.values(), id_to_node.keys()))
return mention_graph, retweet_graph, tweet_id_set, user_id_set, node_to_id, id_to_name, id_to_username, id_to_listedcount | Given a tweet python generator, we encode the information into mention and retweet graphs.
We assume that the tweets are given in increasing timestamp.
Inputs: - tweet_generator: A python generator of tweets in python dictionary (json) format.
Outputs: - mention_graph: The mention graph as a SciPy sparse matrix.
- user_id_set: A python set containing the Twitter ids for all the dataset users.
- node_to_id: A python dictionary that maps from node anonymized ids, to twitter user ids. | Below is the the instruction that describes the task:
### Input:
Given a tweet python generator, we encode the information into mention and retweet graphs.
We assume that the tweets are given in increasing timestamp.
Inputs: - tweet_generator: A python generator of tweets in python dictionary (json) format.
Outputs: - mention_graph: The mention graph as a SciPy sparse matrix.
- user_id_set: A python set containing the Twitter ids for all the dataset users.
- node_to_id: A python dictionary that maps from node anonymized ids, to twitter user ids.
### Response:
def extract_graphs_from_tweets(tweet_generator):
"""
Given a tweet python generator, we encode the information into mention and retweet graphs.
We assume that the tweets are given in increasing timestamp.
Inputs: - tweet_generator: A python generator of tweets in python dictionary (json) format.
Outputs: - mention_graph: The mention graph as a SciPy sparse matrix.
- user_id_set: A python set containing the Twitter ids for all the dataset users.
- node_to_id: A python dictionary that maps from node anonymized ids, to twitter user ids.
"""
####################################################################################################################
# Prepare for iterating over tweets.
####################################################################################################################
# These are initialized as lists for incremental extension.
tweet_id_set = set()
user_id_set = list()
twitter_to_reveal_user_id = dict()
add_tweet_id = tweet_id_set.add
append_user_id = user_id_set.append
# Initialize sparse matrix arrays.
mention_graph_row = list()
mention_graph_col = list()
retweet_graph_row = list()
retweet_graph_col = list()
append_mention_graph_row = mention_graph_row.append
append_mention_graph_col = mention_graph_col.append
append_retweet_graph_row = retweet_graph_row.append
append_retweet_graph_col = retweet_graph_col.append
# Initialize dictionaries.
id_to_node = dict()
id_to_name = dict()
id_to_username = dict()
id_to_listedcount = dict()
####################################################################################################################
# Iterate over tweets.
####################################################################################################################
counter = 0
for tweet in tweet_generator:
# Increment tweet counter.
counter += 1
if counter % 10000 == 0:
print(counter)
# print(counter)
# Extract base tweet's values.
try:
tweet_id = tweet["id"]
user_id = tweet["user"]["id"]
user_screen_name = tweet["user"]["screen_name"]
user_name = tweet["user"]["name"]
listed_count_raw = tweet["user"]["listed_count"]
tweet_in_reply_to_user_id = tweet["in_reply_to_user_id"]
tweet_in_reply_to_screen_name = tweet["in_reply_to_screen_name"]
tweet_entities_user_mentions = tweet["entities"]["user_mentions"]
except KeyError:
continue
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
source_node = id_to_node.setdefault(user_id, graph_size)
if listed_count_raw is None:
id_to_listedcount[user_id] = 0
else:
id_to_listedcount[user_id] = int(listed_count_raw)
# Update sets, lists and dictionaries.
add_tweet_id(tweet_id)
id_to_name[user_id] = user_screen_name
id_to_username[user_id] = user_name
append_user_id(user_id)
# twitter_to_user_id
################################################################################################################
# We are dealing with an original tweet.
################################################################################################################
if "retweeted_status" not in tweet.keys():
############################################################################################################
# Update mention matrix.
############################################################################################################
# Get mentioned user ids.
mentioned_user_id_set = list()
if tweet_in_reply_to_user_id is not None:
mentioned_user_id_set.append(tweet_in_reply_to_user_id)
id_to_name[tweet_in_reply_to_user_id] = tweet_in_reply_to_screen_name
for user_mention in tweet_entities_user_mentions:
mentioned_user_id = user_mention["id"] # TODO: Perhaps safe extract as well.
mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = user_mention["screen_name"] # TODO: Perhaps safe extract as well.
# We remove duplicates.
mentioned_user_id_set = set(mentioned_user_id_set)
# Update the mention graph one-by-one.
for mentioned_user_id in mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(source_node)
append_mention_graph_col(mention_target_node)
################################################################################################################
# We are dealing with a retweet.
################################################################################################################
else:
# Extract base tweet's values.
original_tweet = tweet["retweeted_status"]
try:
original_tweet_id = original_tweet["id"]
original_tweet_user_id = original_tweet["user"]["id"]
original_tweet_user_screen_name = original_tweet["user"]["screen_name"]
original_tweet_user_name = original_tweet["user"]["name"]
listed_count_raw = original_tweet["user"]["listed_count"]
original_tweet_in_reply_to_user_id = original_tweet["in_reply_to_user_id"]
original_tweet_in_reply_to_screen_name = original_tweet["in_reply_to_screen_name"]
original_tweet_entities_user_mentions = original_tweet["entities"]["user_mentions"]
except KeyError:
continue
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
original_tweet_node = id_to_node.setdefault(original_tweet_user_id, graph_size)
if listed_count_raw is None:
id_to_listedcount[original_tweet_user_id] = 0
else:
id_to_listedcount[original_tweet_user_id] = int(listed_count_raw)
# Update retweet graph.
append_retweet_graph_row(source_node)
append_retweet_graph_col(original_tweet_node)
# Get mentioned user ids.
mentioned_user_id_set = list()
if original_tweet_in_reply_to_user_id is not None:
mentioned_user_id_set.append(original_tweet_in_reply_to_user_id)
id_to_name[original_tweet_in_reply_to_user_id] = original_tweet_in_reply_to_screen_name
for user_mention in original_tweet_entities_user_mentions:
mentioned_user_id = user_mention["id"] # TODO: Perhaps safe extract as well.
mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = user_mention["screen_name"] # TODO: Perhaps safe extract as well.
# We remove duplicates.
mentioned_user_id_set = set(mentioned_user_id_set)
# Get mentioned user ids.
retweet_mentioned_user_id_set = list()
if original_tweet_in_reply_to_user_id is not None:
retweet_mentioned_user_id_set.append(original_tweet_in_reply_to_user_id)
id_to_name[original_tweet_in_reply_to_user_id] = original_tweet_in_reply_to_screen_name
for user_mention in original_tweet_entities_user_mentions:
mentioned_user_id = user_mention["id"] # TODO: Perhaps safe extract as well.
retweet_mentioned_user_id_set.append(mentioned_user_id)
id_to_name[mentioned_user_id] = user_mention["screen_name"] # TODO: Perhaps safe extract as well.
# We remove duplicates.
retweet_mentioned_user_id_set = set(retweet_mentioned_user_id_set)
mentioned_user_id_set.update(retweet_mentioned_user_id_set)
# Update the mention graph one-by-one.
for mentioned_user_id in mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(source_node)
append_mention_graph_col(mention_target_node)
# This is the first time we deal with this tweet.
if original_tweet_id not in tweet_id_set:
# Update sets, lists and dictionaries.
add_tweet_id(original_tweet_id)
id_to_name[original_tweet_user_id] = original_tweet_user_screen_name
id_to_username[original_tweet_user_id] = original_tweet_user_name
append_user_id(original_tweet_user_id)
########################################################################################################
# Update mention matrix.
########################################################################################################
# Update the mention graph one-by-one.
for mentioned_user_id in retweet_mentioned_user_id_set:
# Map users to distinct integer numbers.
graph_size = len(id_to_node)
mention_target_node = id_to_node.setdefault(mentioned_user_id, graph_size)
append_user_id(mentioned_user_id)
# Add values to the sparse matrix arrays.
append_mention_graph_row(original_tweet_node)
append_mention_graph_col(mention_target_node)
else:
pass
####################################################################################################################
# Final steps of preprocessing tweets.
####################################################################################################################
# Discard any duplicates.
user_id_set = set(user_id_set)
number_of_users = len(user_id_set)
# min_number_of_users = max(user_id_set) + 1
# Form mention graph adjacency matrix.
mention_graph_row = np.array(mention_graph_row, dtype=np.int64)
mention_graph_col = np.array(mention_graph_col, dtype=np.int64)
mention_graph_data = np.ones_like(mention_graph_row, dtype=np.float64)
mention_graph = spsp.coo_matrix((mention_graph_data, (mention_graph_row, mention_graph_col)),
shape=(number_of_users, number_of_users))
mention_graph = spsp.coo_matrix(spsp.csr_matrix(mention_graph))
# Form retweet graph adjacency matrix.
retweet_graph_row = np.array(retweet_graph_row, dtype=np.int64)
retweet_graph_col = np.array(retweet_graph_col, dtype=np.int64)
retweet_graph_data = np.ones_like(retweet_graph_row, dtype=np.float64)
retweet_graph = spsp.coo_matrix((retweet_graph_data, (retweet_graph_row, retweet_graph_col)),
shape=(number_of_users, number_of_users))
retweet_graph = spsp.coo_matrix(spsp.csr_matrix(retweet_graph))
node_to_id = dict(zip(id_to_node.values(), id_to_node.keys()))
return mention_graph, retweet_graph, tweet_id_set, user_id_set, node_to_id, id_to_name, id_to_username, id_to_listedcount |
def parse_header(head):
"""
Parses the header part of packet
Returns a dict
"""
try:
(fromcall, path) = head.split('>', 1)
except:
raise ParseError("invalid packet header")
if (not 1 <= len(fromcall) <= 9 or
not re.findall(r"^[a-z0-9]{0,9}(\-[a-z0-9]{1,8})?$", fromcall, re.I)):
raise ParseError("fromcallsign is invalid")
path = path.split(',')
if len(path[0]) == 0:
raise ParseError("no tocallsign in header")
tocall = path[0]
path = path[1:]
validate_callsign(tocall, "tocallsign")
for digi in path:
if not re.findall(r"^[A-Z0-9\-]{1,9}\*?$", digi, re.I):
raise ParseError("invalid callsign in path")
parsed = {
'from': fromcall,
'to': tocall,
'path': path,
}
viacall = ""
if len(path) >= 2 and re.match(r"^q..$", path[-2]):
viacall = path[-1]
parsed.update({'via': viacall})
return parsed | Parses the header part of packet
Returns a dict | Below is the the instruction that describes the task:
### Input:
Parses the header part of packet
Returns a dict
### Response:
def parse_header(head):
"""
Parses the header part of packet
Returns a dict
"""
try:
(fromcall, path) = head.split('>', 1)
except:
raise ParseError("invalid packet header")
if (not 1 <= len(fromcall) <= 9 or
not re.findall(r"^[a-z0-9]{0,9}(\-[a-z0-9]{1,8})?$", fromcall, re.I)):
raise ParseError("fromcallsign is invalid")
path = path.split(',')
if len(path[0]) == 0:
raise ParseError("no tocallsign in header")
tocall = path[0]
path = path[1:]
validate_callsign(tocall, "tocallsign")
for digi in path:
if not re.findall(r"^[A-Z0-9\-]{1,9}\*?$", digi, re.I):
raise ParseError("invalid callsign in path")
parsed = {
'from': fromcall,
'to': tocall,
'path': path,
}
viacall = ""
if len(path) >= 2 and re.match(r"^q..$", path[-2]):
viacall = path[-1]
parsed.update({'via': viacall})
return parsed |
def createAssociation(self, dumb=True, assoc_type='HMAC-SHA1'):
"""Make a new association.
@param dumb: Is this association for a dumb-mode transaction?
@type dumb: bool
@param assoc_type: The type of association to create. Currently
there is only one type defined, C{HMAC-SHA1}.
@type assoc_type: str
@returns: the new association.
@returntype: L{openid.association.Association}
"""
secret = cryptutil.getBytes(getSecretSize(assoc_type))
uniq = oidutil.toBase64(cryptutil.getBytes(4))
handle = '{%s}{%x}{%s}' % (assoc_type, int(time.time()), uniq)
assoc = Association.fromExpiresIn(
self.SECRET_LIFETIME, handle, secret, assoc_type)
if dumb:
key = self._dumb_key
else:
key = self._normal_key
self.store.storeAssociation(key, assoc)
return assoc | Make a new association.
@param dumb: Is this association for a dumb-mode transaction?
@type dumb: bool
@param assoc_type: The type of association to create. Currently
there is only one type defined, C{HMAC-SHA1}.
@type assoc_type: str
@returns: the new association.
@returntype: L{openid.association.Association} | Below is the the instruction that describes the task:
### Input:
Make a new association.
@param dumb: Is this association for a dumb-mode transaction?
@type dumb: bool
@param assoc_type: The type of association to create. Currently
there is only one type defined, C{HMAC-SHA1}.
@type assoc_type: str
@returns: the new association.
@returntype: L{openid.association.Association}
### Response:
def createAssociation(self, dumb=True, assoc_type='HMAC-SHA1'):
"""Make a new association.
@param dumb: Is this association for a dumb-mode transaction?
@type dumb: bool
@param assoc_type: The type of association to create. Currently
there is only one type defined, C{HMAC-SHA1}.
@type assoc_type: str
@returns: the new association.
@returntype: L{openid.association.Association}
"""
secret = cryptutil.getBytes(getSecretSize(assoc_type))
uniq = oidutil.toBase64(cryptutil.getBytes(4))
handle = '{%s}{%x}{%s}' % (assoc_type, int(time.time()), uniq)
assoc = Association.fromExpiresIn(
self.SECRET_LIFETIME, handle, secret, assoc_type)
if dumb:
key = self._dumb_key
else:
key = self._normal_key
self.store.storeAssociation(key, assoc)
return assoc |
def do_remove(config, config_dir):
"""
CLI action "remove configuration".
"""
if not os.path.exists(config_dir):
print "Configuration '{}' does not exist.".format(config)
exit(1)
if confirm("Confirm removal of the configuration '{}'".format(config)):
shutil.rmtree(config_dir)
print "Configuration '{}' has been removed.".format(config)
else:
print "Removal cancelled." | CLI action "remove configuration". | Below is the the instruction that describes the task:
### Input:
CLI action "remove configuration".
### Response:
def do_remove(config, config_dir):
"""
CLI action "remove configuration".
"""
if not os.path.exists(config_dir):
print "Configuration '{}' does not exist.".format(config)
exit(1)
if confirm("Confirm removal of the configuration '{}'".format(config)):
shutil.rmtree(config_dir)
print "Configuration '{}' has been removed.".format(config)
else:
print "Removal cancelled." |
def rm_watch(self, wd, rec=False, quiet=True):
"""
Removes watch(s).
@param wd: Watch Descriptor of the file or directory to unwatch.
Also accepts a list of WDs.
@type wd: int or list of int.
@param rec: Recursively removes watches on every already watched
subdirectories and subfiles.
@type rec: bool
@param quiet: If False raises a WatchManagerError exception on
error. See example not_quiet.py
@type quiet: bool
@return: dict of watch descriptors associated to booleans values.
True if the corresponding wd has been successfully
removed, False otherwise.
@rtype: dict of {int: bool}
"""
lwd = self.__format_param(wd)
if rec:
lwd = self.__get_sub_rec(lwd)
ret_ = {} # return {wd: bool, ...}
for awd in lwd:
# remove watch
wd_ = self._inotify_wrapper.inotify_rm_watch(self._fd, awd)
if wd_ < 0:
ret_[awd] = False
err = ('rm_watch: cannot remove WD=%d, %s' % \
(awd, self._inotify_wrapper.str_errno()))
if quiet:
log.error(err)
continue
raise WatchManagerError(err, ret_)
# Remove watch from our dictionary
if awd in self._wmd:
del self._wmd[awd]
ret_[awd] = True
log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd))
return ret_ | Removes watch(s).
@param wd: Watch Descriptor of the file or directory to unwatch.
Also accepts a list of WDs.
@type wd: int or list of int.
@param rec: Recursively removes watches on every already watched
subdirectories and subfiles.
@type rec: bool
@param quiet: If False raises a WatchManagerError exception on
error. See example not_quiet.py
@type quiet: bool
@return: dict of watch descriptors associated to booleans values.
True if the corresponding wd has been successfully
removed, False otherwise.
@rtype: dict of {int: bool} | Below is the the instruction that describes the task:
### Input:
Removes watch(s).
@param wd: Watch Descriptor of the file or directory to unwatch.
Also accepts a list of WDs.
@type wd: int or list of int.
@param rec: Recursively removes watches on every already watched
subdirectories and subfiles.
@type rec: bool
@param quiet: If False raises a WatchManagerError exception on
error. See example not_quiet.py
@type quiet: bool
@return: dict of watch descriptors associated to booleans values.
True if the corresponding wd has been successfully
removed, False otherwise.
@rtype: dict of {int: bool}
### Response:
def rm_watch(self, wd, rec=False, quiet=True):
"""
Removes watch(s).
@param wd: Watch Descriptor of the file or directory to unwatch.
Also accepts a list of WDs.
@type wd: int or list of int.
@param rec: Recursively removes watches on every already watched
subdirectories and subfiles.
@type rec: bool
@param quiet: If False raises a WatchManagerError exception on
error. See example not_quiet.py
@type quiet: bool
@return: dict of watch descriptors associated to booleans values.
True if the corresponding wd has been successfully
removed, False otherwise.
@rtype: dict of {int: bool}
"""
lwd = self.__format_param(wd)
if rec:
lwd = self.__get_sub_rec(lwd)
ret_ = {} # return {wd: bool, ...}
for awd in lwd:
# remove watch
wd_ = self._inotify_wrapper.inotify_rm_watch(self._fd, awd)
if wd_ < 0:
ret_[awd] = False
err = ('rm_watch: cannot remove WD=%d, %s' % \
(awd, self._inotify_wrapper.str_errno()))
if quiet:
log.error(err)
continue
raise WatchManagerError(err, ret_)
# Remove watch from our dictionary
if awd in self._wmd:
del self._wmd[awd]
ret_[awd] = True
log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd))
return ret_ |
def local_1d_halo_exchange(k, v, num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 1D attention."""
if num_w_blocks is not None:
if mask_right:
k = mtf.left_halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.left_halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
k = mtf.halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
if mask_right:
k = mtf.pad(k, [w_dim, None], w_dim.name)
v = mtf.pad(v, [w_dim, None], w_dim.name)
else:
k = mtf.pad(k, [w_dim, w_dim], w_dim.name)
v = mtf.pad(v, [w_dim, w_dim], w_dim.name)
return k, v | Halo exchange for keys and values for Local 1D attention. | Below is the the instruction that describes the task:
### Input:
Halo exchange for keys and values for Local 1D attention.
### Response:
def local_1d_halo_exchange(k, v, num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 1D attention."""
if num_w_blocks is not None:
if mask_right:
k = mtf.left_halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.left_halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
k = mtf.halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
if mask_right:
k = mtf.pad(k, [w_dim, None], w_dim.name)
v = mtf.pad(v, [w_dim, None], w_dim.name)
else:
k = mtf.pad(k, [w_dim, w_dim], w_dim.name)
v = mtf.pad(v, [w_dim, w_dim], w_dim.name)
return k, v |
def _find_usage_dynamodb(self):
"""calculates current usage for all DynamoDB limits"""
table_count = 0
region_read_capacity = 0
region_write_capacity = 0
logger.debug("Getting usage for DynamoDB tables")
for table in self.resource_conn.tables.all():
table_count += 1
gsi_write = 0
gsi_read = 0
gsi_count = 0
if table.global_secondary_indexes is not None:
for gsi in table.global_secondary_indexes:
gsi_count += 1
gsi_read += gsi['ProvisionedThroughput'][
'ReadCapacityUnits']
gsi_write += gsi['ProvisionedThroughput'][
'WriteCapacityUnits']
table_write_capacity = table.provisioned_throughput[
'WriteCapacityUnits'] + gsi_write
table_read_capacity = table.provisioned_throughput[
'ReadCapacityUnits'] + gsi_read
region_write_capacity += table_write_capacity
region_read_capacity += table_read_capacity
self.limits['Global Secondary Indexes']._add_current_usage(
gsi_count,
resource_id=table.name,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Local Secondary Indexes']._add_current_usage(
len(table.local_secondary_indexes)
if table.local_secondary_indexes is not None else 0,
resource_id=table.name,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Table Max Write Capacity Units']._add_current_usage(
table_write_capacity,
resource_id=table.name,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Table Max Read Capacity Units']._add_current_usage(
table_read_capacity,
resource_id=table.name,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Tables Per Region']._add_current_usage(
table_count,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Account Max Write Capacity Units']._add_current_usage(
region_write_capacity,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Account Max Read Capacity Units']._add_current_usage(
region_read_capacity,
aws_type='AWS::DynamoDB::Table'
) | calculates current usage for all DynamoDB limits | Below is the the instruction that describes the task:
### Input:
calculates current usage for all DynamoDB limits
### Response:
def _find_usage_dynamodb(self):
"""calculates current usage for all DynamoDB limits"""
table_count = 0
region_read_capacity = 0
region_write_capacity = 0
logger.debug("Getting usage for DynamoDB tables")
for table in self.resource_conn.tables.all():
table_count += 1
gsi_write = 0
gsi_read = 0
gsi_count = 0
if table.global_secondary_indexes is not None:
for gsi in table.global_secondary_indexes:
gsi_count += 1
gsi_read += gsi['ProvisionedThroughput'][
'ReadCapacityUnits']
gsi_write += gsi['ProvisionedThroughput'][
'WriteCapacityUnits']
table_write_capacity = table.provisioned_throughput[
'WriteCapacityUnits'] + gsi_write
table_read_capacity = table.provisioned_throughput[
'ReadCapacityUnits'] + gsi_read
region_write_capacity += table_write_capacity
region_read_capacity += table_read_capacity
self.limits['Global Secondary Indexes']._add_current_usage(
gsi_count,
resource_id=table.name,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Local Secondary Indexes']._add_current_usage(
len(table.local_secondary_indexes)
if table.local_secondary_indexes is not None else 0,
resource_id=table.name,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Table Max Write Capacity Units']._add_current_usage(
table_write_capacity,
resource_id=table.name,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Table Max Read Capacity Units']._add_current_usage(
table_read_capacity,
resource_id=table.name,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Tables Per Region']._add_current_usage(
table_count,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Account Max Write Capacity Units']._add_current_usage(
region_write_capacity,
aws_type='AWS::DynamoDB::Table'
)
self.limits['Account Max Read Capacity Units']._add_current_usage(
region_read_capacity,
aws_type='AWS::DynamoDB::Table'
) |
def _parse_w_comma_h(self, whstr, param):
"""Utility to parse "w,h" "w," or ",h" values.
Returns (w,h) where w,h are either None or ineteger. Will
throw a ValueError if there is a problem with one or both.
"""
try:
(wstr, hstr) = whstr.split(',', 2)
w = self._parse_non_negative_int(wstr, 'w')
h = self._parse_non_negative_int(hstr, 'h')
except ValueError as e:
raise IIIFRequestError(
code=400, parameter=param,
text="Illegal %s value (%s)." % (param, str(e)))
if (w is None and h is None):
raise IIIFRequestError(
code=400, parameter=param,
text="Must specify at least one of w,h for %s." % (param))
return(w, h) | Utility to parse "w,h" "w," or ",h" values.
Returns (w,h) where w,h are either None or ineteger. Will
throw a ValueError if there is a problem with one or both. | Below is the the instruction that describes the task:
### Input:
Utility to parse "w,h" "w," or ",h" values.
Returns (w,h) where w,h are either None or ineteger. Will
throw a ValueError if there is a problem with one or both.
### Response:
def _parse_w_comma_h(self, whstr, param):
"""Utility to parse "w,h" "w," or ",h" values.
Returns (w,h) where w,h are either None or ineteger. Will
throw a ValueError if there is a problem with one or both.
"""
try:
(wstr, hstr) = whstr.split(',', 2)
w = self._parse_non_negative_int(wstr, 'w')
h = self._parse_non_negative_int(hstr, 'h')
except ValueError as e:
raise IIIFRequestError(
code=400, parameter=param,
text="Illegal %s value (%s)." % (param, str(e)))
if (w is None and h is None):
raise IIIFRequestError(
code=400, parameter=param,
text="Must specify at least one of w,h for %s." % (param))
return(w, h) |
def _merge_update_item(self, model_item, data):
"""
Merge a model with a python data structure
This is useful to turn PUT method into a PATCH also
:param model_item: SQLA Model
:param data: python data structure
:return: python data structure
"""
data_item = self.edit_model_schema.dump(model_item, many=False).data
for _col in self.edit_columns:
if _col not in data.keys():
data[_col] = data_item[_col]
return data | Merge a model with a python data structure
This is useful to turn PUT method into a PATCH also
:param model_item: SQLA Model
:param data: python data structure
:return: python data structure | Below is the the instruction that describes the task:
### Input:
Merge a model with a python data structure
This is useful to turn PUT method into a PATCH also
:param model_item: SQLA Model
:param data: python data structure
:return: python data structure
### Response:
def _merge_update_item(self, model_item, data):
"""
Merge a model with a python data structure
This is useful to turn PUT method into a PATCH also
:param model_item: SQLA Model
:param data: python data structure
:return: python data structure
"""
data_item = self.edit_model_schema.dump(model_item, many=False).data
for _col in self.edit_columns:
if _col not in data.keys():
data[_col] = data_item[_col]
return data |
def check_alert(step, text):
"""
Check the alert text
"""
try:
alert = Alert(world.browser)
assert_equals(alert.text, text)
except WebDriverException:
# PhantomJS is kinda poor
pass | Check the alert text | Below is the the instruction that describes the task:
### Input:
Check the alert text
### Response:
def check_alert(step, text):
"""
Check the alert text
"""
try:
alert = Alert(world.browser)
assert_equals(alert.text, text)
except WebDriverException:
# PhantomJS is kinda poor
pass |
def wait_for_job(self):
"""
Use a redis blocking list call to wait for a job, and return it.
"""
blpop_result = self.connection.blpop(self.keys, self.timeout)
if blpop_result is None:
return None
queue_redis_key, job_ident = blpop_result
self.set_status('running')
return self.get_queue(queue_redis_key), self.get_job(job_ident) | Use a redis blocking list call to wait for a job, and return it. | Below is the the instruction that describes the task:
### Input:
Use a redis blocking list call to wait for a job, and return it.
### Response:
def wait_for_job(self):
"""
Use a redis blocking list call to wait for a job, and return it.
"""
blpop_result = self.connection.blpop(self.keys, self.timeout)
if blpop_result is None:
return None
queue_redis_key, job_ident = blpop_result
self.set_status('running')
return self.get_queue(queue_redis_key), self.get_job(job_ident) |
def get(self, path, params=None, headers=None):
"""Perform a GET request, optionally providing query-string params.
Args:
path (str): A path that gets appended to ``base_url``.
params (dict, optional): Dictionary of param names to values.
Example:
api_client.get('/users', params={'active': True})
Returns:
A requests ``Response`` object.
"""
response = requests.get(
self._url_for(path),
params=params,
headers=self._headers(headers)
)
self._handle_errors(response)
return response | Perform a GET request, optionally providing query-string params.
Args:
path (str): A path that gets appended to ``base_url``.
params (dict, optional): Dictionary of param names to values.
Example:
api_client.get('/users', params={'active': True})
Returns:
A requests ``Response`` object. | Below is the the instruction that describes the task:
### Input:
Perform a GET request, optionally providing query-string params.
Args:
path (str): A path that gets appended to ``base_url``.
params (dict, optional): Dictionary of param names to values.
Example:
api_client.get('/users', params={'active': True})
Returns:
A requests ``Response`` object.
### Response:
def get(self, path, params=None, headers=None):
"""Perform a GET request, optionally providing query-string params.
Args:
path (str): A path that gets appended to ``base_url``.
params (dict, optional): Dictionary of param names to values.
Example:
api_client.get('/users', params={'active': True})
Returns:
A requests ``Response`` object.
"""
response = requests.get(
self._url_for(path),
params=params,
headers=self._headers(headers)
)
self._handle_errors(response)
return response |
def append_id(self, id_col_name='append_id', cols=None):
"""
Append an ID column to current DataFrame.
:param str id_col_name: name of appended ID field.
:param str cols: fields contained in output. All fields by default.
:return: DataFrame with ID field
:rtype: DataFrame
"""
from .. import preprocess
if id_col_name in self.schema:
raise ValueError('ID column collides with existing columns.')
append_id_obj = getattr(preprocess, '_AppendID')(id_col=id_col_name, selected_cols=cols)
return append_id_obj.transform(self) | Append an ID column to current DataFrame.
:param str id_col_name: name of appended ID field.
:param str cols: fields contained in output. All fields by default.
:return: DataFrame with ID field
:rtype: DataFrame | Below is the the instruction that describes the task:
### Input:
Append an ID column to current DataFrame.
:param str id_col_name: name of appended ID field.
:param str cols: fields contained in output. All fields by default.
:return: DataFrame with ID field
:rtype: DataFrame
### Response:
def append_id(self, id_col_name='append_id', cols=None):
"""
Append an ID column to current DataFrame.
:param str id_col_name: name of appended ID field.
:param str cols: fields contained in output. All fields by default.
:return: DataFrame with ID field
:rtype: DataFrame
"""
from .. import preprocess
if id_col_name in self.schema:
raise ValueError('ID column collides with existing columns.')
append_id_obj = getattr(preprocess, '_AppendID')(id_col=id_col_name, selected_cols=cols)
return append_id_obj.transform(self) |
def remove_absolute_mask__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (mask)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score) | Remove Absolute (mask)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9 | Below is the the instruction that describes the task:
### Input:
Remove Absolute (mask)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
### Response:
def remove_absolute_mask__r2(X, y, model_generator, method_name, num_fcounts=11):
""" Remove Absolute (mask)
xlabel = "Max fraction of features removed"
ylabel = "1 - R^2"
transform = "one_minus"
sort_order = 9
"""
return __run_measure(measures.remove_mask, X, y, model_generator, method_name, 0, num_fcounts, sklearn.metrics.r2_score) |
def sas_logical_interconnect_groups(self):
"""
Gets the SasLogicalInterconnectGroups API client.
Returns:
SasLogicalInterconnectGroups:
"""
if not self.__sas_logical_interconnect_groups:
self.__sas_logical_interconnect_groups = SasLogicalInterconnectGroups(self.__connection)
return self.__sas_logical_interconnect_groups | Gets the SasLogicalInterconnectGroups API client.
Returns:
SasLogicalInterconnectGroups: | Below is the the instruction that describes the task:
### Input:
Gets the SasLogicalInterconnectGroups API client.
Returns:
SasLogicalInterconnectGroups:
### Response:
def sas_logical_interconnect_groups(self):
"""
Gets the SasLogicalInterconnectGroups API client.
Returns:
SasLogicalInterconnectGroups:
"""
if not self.__sas_logical_interconnect_groups:
self.__sas_logical_interconnect_groups = SasLogicalInterconnectGroups(self.__connection)
return self.__sas_logical_interconnect_groups |
def jump(self, selected_number: int) -> None:
"""Jump to a specific trace frame in a trace.
Parameters:
selected_number: int the trace frame number from trace output
"""
self._verify_entrypoint_selected()
if selected_number < 1 or selected_number > len(self.trace_tuples):
raise UserError(
"Trace frame number out of bounds "
f"(expected 1-{len(self.trace_tuples)} but got {selected_number})."
)
self.current_trace_frame_index = selected_number - 1
self.trace() | Jump to a specific trace frame in a trace.
Parameters:
selected_number: int the trace frame number from trace output | Below is the the instruction that describes the task:
### Input:
Jump to a specific trace frame in a trace.
Parameters:
selected_number: int the trace frame number from trace output
### Response:
def jump(self, selected_number: int) -> None:
"""Jump to a specific trace frame in a trace.
Parameters:
selected_number: int the trace frame number from trace output
"""
self._verify_entrypoint_selected()
if selected_number < 1 or selected_number > len(self.trace_tuples):
raise UserError(
"Trace frame number out of bounds "
f"(expected 1-{len(self.trace_tuples)} but got {selected_number})."
)
self.current_trace_frame_index = selected_number - 1
self.trace() |
def save(self, filename):
"""Save the configuration to a file"""
filename = pathlib.Path(filename)
out = []
keys = sorted(list(self.keys()))
for key in keys:
out.append("[{}]".format(key))
section = self[key]
ikeys = list(section.keys())
ikeys.sort()
for ikey in ikeys:
var, val = keyval_typ2str(ikey, section[ikey])
out.append("{} = {}".format(var, val))
out.append("")
with filename.open("w") as f:
for i in range(len(out)):
# win-like line endings
out[i] = out[i]+"\n"
f.writelines(out) | Save the configuration to a file | Below is the the instruction that describes the task:
### Input:
Save the configuration to a file
### Response:
def save(self, filename):
"""Save the configuration to a file"""
filename = pathlib.Path(filename)
out = []
keys = sorted(list(self.keys()))
for key in keys:
out.append("[{}]".format(key))
section = self[key]
ikeys = list(section.keys())
ikeys.sort()
for ikey in ikeys:
var, val = keyval_typ2str(ikey, section[ikey])
out.append("{} = {}".format(var, val))
out.append("")
with filename.open("w") as f:
for i in range(len(out)):
# win-like line endings
out[i] = out[i]+"\n"
f.writelines(out) |
def _get_initial_states(self,
batch_size: int,
num_valid: int,
sorting_indices: torch.LongTensor) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0),
num_states_to_concat,
state.size(2))
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :].contiguous()
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [state.index_select(1, sorting_indices)
for state in correctly_shaped_states]
return tuple(state[:, :num_valid, :].contiguous() for state in sorted_states) | Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``. | Below is the the instruction that describes the task:
### Input:
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
### Response:
def _get_initial_states(self,
batch_size: int,
num_valid: int,
sorting_indices: torch.LongTensor) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0),
num_states_to_concat,
state.size(2))
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :].contiguous()
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [state.index_select(1, sorting_indices)
for state in correctly_shaped_states]
return tuple(state[:, :num_valid, :].contiguous() for state in sorted_states) |
def setSnapToGrid( self, state = True ):
"""
Sets the snap to grid property for both the x and y directions \
based on the inputed value.
:param state <bool>
"""
self._xSnapToGrid = state
self._ySnapToGrid = state | Sets the snap to grid property for both the x and y directions \
based on the inputed value.
:param state <bool> | Below is the the instruction that describes the task:
### Input:
Sets the snap to grid property for both the x and y directions \
based on the inputed value.
:param state <bool>
### Response:
def setSnapToGrid( self, state = True ):
"""
Sets the snap to grid property for both the x and y directions \
based on the inputed value.
:param state <bool>
"""
self._xSnapToGrid = state
self._ySnapToGrid = state |
def smartparse(cls, toparse, tzinfo=None):
"""Method which uses dateutil.parse and extras to try and parse the string.
Valid dates are found at:
http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2
Other valid formats include:
"now" or "today"
"yesterday"
"tomorrow"
"5 minutes ago"
"10 hours ago"
"10h5m ago"
"start of yesterday"
"end of tomorrow"
"end of 3rd of March"
Args:
toparse: The string to parse.
tzinfo: Timezone for the resultant datetime_tz object should be in.
(Defaults to your local timezone.)
Returns:
New datetime_tz object.
Raises:
ValueError: If unable to make sense of the input.
"""
# Default for empty fields are:
# year/month/day == now
# hour/minute/second/microsecond == 0
toparse = toparse.strip()
if tzinfo is None:
dt = cls.now()
else:
dt = cls.now(tzinfo)
default = dt.replace(hour=0, minute=0, second=0, microsecond=0)
# Remove "start of " and "end of " prefix in the string
if toparse.lower().startswith("end of "):
toparse = toparse[7:].strip()
dt += datetime.timedelta(days=1)
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
dt -= datetime.timedelta(microseconds=1)
default = dt
elif toparse.lower().startswith("start of "):
toparse = toparse[9:].strip()
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
default = dt
# Handle strings with "now", "today", "yesterday", "tomorrow" and "ago".
# Need to use lowercase
toparselower = toparse.lower()
if toparselower in ["now", "today"]:
pass
elif toparselower == "yesterday":
dt -= datetime.timedelta(days=1)
elif toparselower in ("tomorrow", "tommorrow"):
# tommorrow is spelled wrong, but code out there might be depending on it
# working
dt += datetime.timedelta(days=1)
elif "ago" in toparselower:
# Remove the "ago" bit
toparselower = toparselower[:-3]
# Replace all "a day and an hour" with "1 day 1 hour"
toparselower = toparselower.replace("a ", "1 ")
toparselower = toparselower.replace("an ", "1 ")
toparselower = toparselower.replace(" and ", " ")
# Match the following
# 1 hour ago
# 1h ago
# 1 h ago
# 1 hour ago
# 2 hours ago
# Same with minutes, seconds, etc.
tocheck = ("seconds", "minutes", "hours", "days", "weeks", "months",
"years")
result = {}
for match in re.finditer("([0-9]+)([^0-9]*)", toparselower):
amount = int(match.group(1))
unit = match.group(2).strip()
for bit in tocheck:
regex = "^([%s]|((%s)s?))$" % (
bit[0], bit[:-1])
bitmatch = re.search(regex, unit)
if bitmatch:
result[bit] = amount
break
else:
raise ValueError("Was not able to parse date unit %r!" % unit)
delta = dateutil.relativedelta.relativedelta(**result)
dt -= delta
else:
# Handle strings with normal datetime format, use original case.
dt = dateutil.parser.parse(toparse, default=default.asdatetime(),
tzinfos=pytz_abbr.tzinfos)
if dt is None:
raise ValueError("Was not able to parse date!")
if dt.tzinfo is pytz_abbr.unknown:
dt = dt.replace(tzinfo=None)
if dt.tzinfo is None:
if tzinfo is None:
tzinfo = localtz()
dt = cls(dt, tzinfo)
else:
if isinstance(dt.tzinfo, pytz_abbr.tzabbr):
abbr = dt.tzinfo
dt = dt.replace(tzinfo=None)
dt = cls(dt, abbr.zone, is_dst=abbr.dst)
dt = cls(dt)
return dt | Method which uses dateutil.parse and extras to try and parse the string.
Valid dates are found at:
http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2
Other valid formats include:
"now" or "today"
"yesterday"
"tomorrow"
"5 minutes ago"
"10 hours ago"
"10h5m ago"
"start of yesterday"
"end of tomorrow"
"end of 3rd of March"
Args:
toparse: The string to parse.
tzinfo: Timezone for the resultant datetime_tz object should be in.
(Defaults to your local timezone.)
Returns:
New datetime_tz object.
Raises:
ValueError: If unable to make sense of the input. | Below is the the instruction that describes the task:
### Input:
Method which uses dateutil.parse and extras to try and parse the string.
Valid dates are found at:
http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2
Other valid formats include:
"now" or "today"
"yesterday"
"tomorrow"
"5 minutes ago"
"10 hours ago"
"10h5m ago"
"start of yesterday"
"end of tomorrow"
"end of 3rd of March"
Args:
toparse: The string to parse.
tzinfo: Timezone for the resultant datetime_tz object should be in.
(Defaults to your local timezone.)
Returns:
New datetime_tz object.
Raises:
ValueError: If unable to make sense of the input.
### Response:
def smartparse(cls, toparse, tzinfo=None):
"""Method which uses dateutil.parse and extras to try and parse the string.
Valid dates are found at:
http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2
Other valid formats include:
"now" or "today"
"yesterday"
"tomorrow"
"5 minutes ago"
"10 hours ago"
"10h5m ago"
"start of yesterday"
"end of tomorrow"
"end of 3rd of March"
Args:
toparse: The string to parse.
tzinfo: Timezone for the resultant datetime_tz object should be in.
(Defaults to your local timezone.)
Returns:
New datetime_tz object.
Raises:
ValueError: If unable to make sense of the input.
"""
# Default for empty fields are:
# year/month/day == now
# hour/minute/second/microsecond == 0
toparse = toparse.strip()
if tzinfo is None:
dt = cls.now()
else:
dt = cls.now(tzinfo)
default = dt.replace(hour=0, minute=0, second=0, microsecond=0)
# Remove "start of " and "end of " prefix in the string
if toparse.lower().startswith("end of "):
toparse = toparse[7:].strip()
dt += datetime.timedelta(days=1)
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
dt -= datetime.timedelta(microseconds=1)
default = dt
elif toparse.lower().startswith("start of "):
toparse = toparse[9:].strip()
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
default = dt
# Handle strings with "now", "today", "yesterday", "tomorrow" and "ago".
# Need to use lowercase
toparselower = toparse.lower()
if toparselower in ["now", "today"]:
pass
elif toparselower == "yesterday":
dt -= datetime.timedelta(days=1)
elif toparselower in ("tomorrow", "tommorrow"):
# tommorrow is spelled wrong, but code out there might be depending on it
# working
dt += datetime.timedelta(days=1)
elif "ago" in toparselower:
# Remove the "ago" bit
toparselower = toparselower[:-3]
# Replace all "a day and an hour" with "1 day 1 hour"
toparselower = toparselower.replace("a ", "1 ")
toparselower = toparselower.replace("an ", "1 ")
toparselower = toparselower.replace(" and ", " ")
# Match the following
# 1 hour ago
# 1h ago
# 1 h ago
# 1 hour ago
# 2 hours ago
# Same with minutes, seconds, etc.
tocheck = ("seconds", "minutes", "hours", "days", "weeks", "months",
"years")
result = {}
for match in re.finditer("([0-9]+)([^0-9]*)", toparselower):
amount = int(match.group(1))
unit = match.group(2).strip()
for bit in tocheck:
regex = "^([%s]|((%s)s?))$" % (
bit[0], bit[:-1])
bitmatch = re.search(regex, unit)
if bitmatch:
result[bit] = amount
break
else:
raise ValueError("Was not able to parse date unit %r!" % unit)
delta = dateutil.relativedelta.relativedelta(**result)
dt -= delta
else:
# Handle strings with normal datetime format, use original case.
dt = dateutil.parser.parse(toparse, default=default.asdatetime(),
tzinfos=pytz_abbr.tzinfos)
if dt is None:
raise ValueError("Was not able to parse date!")
if dt.tzinfo is pytz_abbr.unknown:
dt = dt.replace(tzinfo=None)
if dt.tzinfo is None:
if tzinfo is None:
tzinfo = localtz()
dt = cls(dt, tzinfo)
else:
if isinstance(dt.tzinfo, pytz_abbr.tzabbr):
abbr = dt.tzinfo
dt = dt.replace(tzinfo=None)
dt = cls(dt, abbr.zone, is_dst=abbr.dst)
dt = cls(dt)
return dt |
def _request_api(self, **kwargs):
"""Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code
"""
_url = kwargs.get('url')
_method = kwargs.get('method', 'GET')
_status = kwargs.get('status', 200)
counter = 0
if _method not in ['GET', 'POST']:
raise ValueError('Method is not GET or POST')
while True:
try:
res = REQ[_method](_url, cookies=self._cookie)
if res.status_code == _status:
break
else:
raise BadStatusException(res.content)
except requests.exceptions.BaseHTTPError:
if counter < self._retries:
counter += 1
continue
raise MaxRetryError
self._last_result = res
return res | Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code | Below is the the instruction that describes the task:
### Input:
Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code
### Response:
def _request_api(self, **kwargs):
"""Wrap the calls the url, with the given arguments.
:param str url: Url to call with the given arguments
:param str method: [POST | GET] Method to use on the request
:param int status: Expected status code
"""
_url = kwargs.get('url')
_method = kwargs.get('method', 'GET')
_status = kwargs.get('status', 200)
counter = 0
if _method not in ['GET', 'POST']:
raise ValueError('Method is not GET or POST')
while True:
try:
res = REQ[_method](_url, cookies=self._cookie)
if res.status_code == _status:
break
else:
raise BadStatusException(res.content)
except requests.exceptions.BaseHTTPError:
if counter < self._retries:
counter += 1
continue
raise MaxRetryError
self._last_result = res
return res |
def startswith(self, other):
# type: (UrlPath) -> bool
"""
Return True if this path starts with the other path.
"""
try:
other = UrlPath.from_object(other)
except ValueError:
raise TypeError('startswith first arg must be UrlPath, str, PathParam, not {}'.format(type(other)))
else:
return self._nodes[:len(other._nodes)] == other._nodes | Return True if this path starts with the other path. | Below is the the instruction that describes the task:
### Input:
Return True if this path starts with the other path.
### Response:
def startswith(self, other):
# type: (UrlPath) -> bool
"""
Return True if this path starts with the other path.
"""
try:
other = UrlPath.from_object(other)
except ValueError:
raise TypeError('startswith first arg must be UrlPath, str, PathParam, not {}'.format(type(other)))
else:
return self._nodes[:len(other._nodes)] == other._nodes |
def int_input(message, low, high, show_range = True):
'''
Ask a user for a int input between two values
args:
message (str): Prompt for user
low (int): Low value, user entered value must be > this value to be accepted
high (int): High value, user entered value must be < this value to be accepted
show_range (boolean, Default True): Print hint to user the range
returns:
int_in (int): Input integer
'''
int_in = low - 1
while (int_in < low) or (int_in > high):
if show_range:
suffix = ' (integer between ' + str(low) + ' and ' + str(high) + ')'
else:
suffix = ''
inp = input('Enter a ' + message + suffix + ': ')
if re.match('^-?[0-9]+$', inp) is not None:
int_in = int(inp)
else:
print(colored('Must be an integer, try again!', 'red'))
return int_in | Ask a user for a int input between two values
args:
message (str): Prompt for user
low (int): Low value, user entered value must be > this value to be accepted
high (int): High value, user entered value must be < this value to be accepted
show_range (boolean, Default True): Print hint to user the range
returns:
int_in (int): Input integer | Below is the the instruction that describes the task:
### Input:
Ask a user for a int input between two values
args:
message (str): Prompt for user
low (int): Low value, user entered value must be > this value to be accepted
high (int): High value, user entered value must be < this value to be accepted
show_range (boolean, Default True): Print hint to user the range
returns:
int_in (int): Input integer
### Response:
def int_input(message, low, high, show_range = True):
'''
Ask a user for a int input between two values
args:
message (str): Prompt for user
low (int): Low value, user entered value must be > this value to be accepted
high (int): High value, user entered value must be < this value to be accepted
show_range (boolean, Default True): Print hint to user the range
returns:
int_in (int): Input integer
'''
int_in = low - 1
while (int_in < low) or (int_in > high):
if show_range:
suffix = ' (integer between ' + str(low) + ' and ' + str(high) + ')'
else:
suffix = ''
inp = input('Enter a ' + message + suffix + ': ')
if re.match('^-?[0-9]+$', inp) is not None:
int_in = int(inp)
else:
print(colored('Must be an integer, try again!', 'red'))
return int_in |
def get_user_settings_module(project_root: str):
"""Return project-specific user settings module, if it exists.
:param project_root: Absolute path to project root directory.
A project settings file is a file named `YSETTINGS_FILE` found at the top
level of the project root dir.
Return `None` if project root dir is not specified,
or if no such file is found.
Raise an exception if a file is found, but not importable.
The YSettings file can define 2 special module-level functions that
interact with the YABT CLI & config system:
1. `extend_cli`, if defined, takes the YABT `parser` object and may extend
it, to add custom command-line flags for the project.
(careful not to collide with YABT flags...)
2. `extend_config`, if defined, takes the YABT `config` object and the
parsed `args` object (returned by the the parser), and may extend the
config - should be used to reflect custom project CLI flags in the
config object.
Beyond that, the settings module is available in YBuild's under
`conf.settings` (except for the 2 special fucntions that are removed).
"""
if project_root:
project_settings_file = os.path.join(project_root, YSETTINGS_FILE)
if os.path.isfile(project_settings_file):
settings_loader = SourceFileLoader(
'settings', project_settings_file)
return settings_loader.load_module() | Return project-specific user settings module, if it exists.
:param project_root: Absolute path to project root directory.
A project settings file is a file named `YSETTINGS_FILE` found at the top
level of the project root dir.
Return `None` if project root dir is not specified,
or if no such file is found.
Raise an exception if a file is found, but not importable.
The YSettings file can define 2 special module-level functions that
interact with the YABT CLI & config system:
1. `extend_cli`, if defined, takes the YABT `parser` object and may extend
it, to add custom command-line flags for the project.
(careful not to collide with YABT flags...)
2. `extend_config`, if defined, takes the YABT `config` object and the
parsed `args` object (returned by the the parser), and may extend the
config - should be used to reflect custom project CLI flags in the
config object.
Beyond that, the settings module is available in YBuild's under
`conf.settings` (except for the 2 special fucntions that are removed). | Below is the the instruction that describes the task:
### Input:
Return project-specific user settings module, if it exists.
:param project_root: Absolute path to project root directory.
A project settings file is a file named `YSETTINGS_FILE` found at the top
level of the project root dir.
Return `None` if project root dir is not specified,
or if no such file is found.
Raise an exception if a file is found, but not importable.
The YSettings file can define 2 special module-level functions that
interact with the YABT CLI & config system:
1. `extend_cli`, if defined, takes the YABT `parser` object and may extend
it, to add custom command-line flags for the project.
(careful not to collide with YABT flags...)
2. `extend_config`, if defined, takes the YABT `config` object and the
parsed `args` object (returned by the the parser), and may extend the
config - should be used to reflect custom project CLI flags in the
config object.
Beyond that, the settings module is available in YBuild's under
`conf.settings` (except for the 2 special fucntions that are removed).
### Response:
def get_user_settings_module(project_root: str):
"""Return project-specific user settings module, if it exists.
:param project_root: Absolute path to project root directory.
A project settings file is a file named `YSETTINGS_FILE` found at the top
level of the project root dir.
Return `None` if project root dir is not specified,
or if no such file is found.
Raise an exception if a file is found, but not importable.
The YSettings file can define 2 special module-level functions that
interact with the YABT CLI & config system:
1. `extend_cli`, if defined, takes the YABT `parser` object and may extend
it, to add custom command-line flags for the project.
(careful not to collide with YABT flags...)
2. `extend_config`, if defined, takes the YABT `config` object and the
parsed `args` object (returned by the the parser), and may extend the
config - should be used to reflect custom project CLI flags in the
config object.
Beyond that, the settings module is available in YBuild's under
`conf.settings` (except for the 2 special fucntions that are removed).
"""
if project_root:
project_settings_file = os.path.join(project_root, YSETTINGS_FILE)
if os.path.isfile(project_settings_file):
settings_loader = SourceFileLoader(
'settings', project_settings_file)
return settings_loader.load_module() |
def estimate_frequency_for_zero(self, sample_rate: float, nbits=42) -> float:
"""
Calculates the frequency of at most nbits logical zeros and returns the mean of these frequencies
:param nbits:
:return:
"""
return self.__estimate_frequency_for_bit(False, sample_rate, nbits) | Calculates the frequency of at most nbits logical zeros and returns the mean of these frequencies
:param nbits:
:return: | Below is the the instruction that describes the task:
### Input:
Calculates the frequency of at most nbits logical zeros and returns the mean of these frequencies
:param nbits:
:return:
### Response:
def estimate_frequency_for_zero(self, sample_rate: float, nbits=42) -> float:
"""
Calculates the frequency of at most nbits logical zeros and returns the mean of these frequencies
:param nbits:
:return:
"""
return self.__estimate_frequency_for_bit(False, sample_rate, nbits) |
def set_gss_host(self, gss_host, trust_dns=True, gssapi_requested=True):
"""
Normalize/canonicalize ``self.gss_host`` depending on various factors.
:param str gss_host:
The explicitly requested GSS-oriented hostname to connect to (i.e.
what the host's name is in the Kerberos database.) Defaults to
``self.hostname`` (which will be the 'real' target hostname and/or
host portion of given socket object.)
:param bool trust_dns:
Indicates whether or not DNS is trusted; if true, DNS will be used
to canonicalize the GSS hostname (which again will either be
``gss_host`` or the transport's default hostname.)
(Defaults to True due to backwards compatibility.)
:param bool gssapi_requested:
Whether GSSAPI key exchange or authentication was even requested.
If not, this is a no-op and nothing happens
(and ``self.gss_host`` is not set.)
(Defaults to True due to backwards compatibility.)
:returns: ``None``.
"""
# No GSSAPI in play == nothing to do
if not gssapi_requested:
return
# Obtain the correct host first - did user request a GSS-specific name
# to use that is distinct from the actual SSH target hostname?
if gss_host is None:
gss_host = self.hostname
# Finally, canonicalize via DNS if DNS is trusted.
if trust_dns and gss_host is not None:
gss_host = socket.getfqdn(gss_host)
# And set attribute for reference later.
self.gss_host = gss_host | Normalize/canonicalize ``self.gss_host`` depending on various factors.
:param str gss_host:
The explicitly requested GSS-oriented hostname to connect to (i.e.
what the host's name is in the Kerberos database.) Defaults to
``self.hostname`` (which will be the 'real' target hostname and/or
host portion of given socket object.)
:param bool trust_dns:
Indicates whether or not DNS is trusted; if true, DNS will be used
to canonicalize the GSS hostname (which again will either be
``gss_host`` or the transport's default hostname.)
(Defaults to True due to backwards compatibility.)
:param bool gssapi_requested:
Whether GSSAPI key exchange or authentication was even requested.
If not, this is a no-op and nothing happens
(and ``self.gss_host`` is not set.)
(Defaults to True due to backwards compatibility.)
:returns: ``None``. | Below is the the instruction that describes the task:
### Input:
Normalize/canonicalize ``self.gss_host`` depending on various factors.
:param str gss_host:
The explicitly requested GSS-oriented hostname to connect to (i.e.
what the host's name is in the Kerberos database.) Defaults to
``self.hostname`` (which will be the 'real' target hostname and/or
host portion of given socket object.)
:param bool trust_dns:
Indicates whether or not DNS is trusted; if true, DNS will be used
to canonicalize the GSS hostname (which again will either be
``gss_host`` or the transport's default hostname.)
(Defaults to True due to backwards compatibility.)
:param bool gssapi_requested:
Whether GSSAPI key exchange or authentication was even requested.
If not, this is a no-op and nothing happens
(and ``self.gss_host`` is not set.)
(Defaults to True due to backwards compatibility.)
:returns: ``None``.
### Response:
def set_gss_host(self, gss_host, trust_dns=True, gssapi_requested=True):
"""
Normalize/canonicalize ``self.gss_host`` depending on various factors.
:param str gss_host:
The explicitly requested GSS-oriented hostname to connect to (i.e.
what the host's name is in the Kerberos database.) Defaults to
``self.hostname`` (which will be the 'real' target hostname and/or
host portion of given socket object.)
:param bool trust_dns:
Indicates whether or not DNS is trusted; if true, DNS will be used
to canonicalize the GSS hostname (which again will either be
``gss_host`` or the transport's default hostname.)
(Defaults to True due to backwards compatibility.)
:param bool gssapi_requested:
Whether GSSAPI key exchange or authentication was even requested.
If not, this is a no-op and nothing happens
(and ``self.gss_host`` is not set.)
(Defaults to True due to backwards compatibility.)
:returns: ``None``.
"""
# No GSSAPI in play == nothing to do
if not gssapi_requested:
return
# Obtain the correct host first - did user request a GSS-specific name
# to use that is distinct from the actual SSH target hostname?
if gss_host is None:
gss_host = self.hostname
# Finally, canonicalize via DNS if DNS is trusted.
if trust_dns and gss_host is not None:
gss_host = socket.getfqdn(gss_host)
# And set attribute for reference later.
self.gss_host = gss_host |
def get_docstring(obj):
"""Extract the docstring from an object as individual lines.
Parameters
----------
obj : object
The Python object (class, function or method) to extract docstrings
from.
Returns
-------
lines : `list` of `str`
Individual docstring lines with common indentation removed, and
newline characters stripped.
Notes
-----
If the object does not have a docstring, a docstring with the content
``"Undocumented."`` is created.
"""
docstring = getdoc(obj, allow_inherited=True)
if docstring is None:
logger = getLogger(__name__)
logger.warning("Object %s doesn't have a docstring.", obj)
docstring = 'Undocumented'
# ignore is simply the number of initial lines to ignore when determining
# the docstring's baseline indent level. We really want "1" here.
return prepare_docstring(docstring, ignore=1) | Extract the docstring from an object as individual lines.
Parameters
----------
obj : object
The Python object (class, function or method) to extract docstrings
from.
Returns
-------
lines : `list` of `str`
Individual docstring lines with common indentation removed, and
newline characters stripped.
Notes
-----
If the object does not have a docstring, a docstring with the content
``"Undocumented."`` is created. | Below is the the instruction that describes the task:
### Input:
Extract the docstring from an object as individual lines.
Parameters
----------
obj : object
The Python object (class, function or method) to extract docstrings
from.
Returns
-------
lines : `list` of `str`
Individual docstring lines with common indentation removed, and
newline characters stripped.
Notes
-----
If the object does not have a docstring, a docstring with the content
``"Undocumented."`` is created.
### Response:
def get_docstring(obj):
"""Extract the docstring from an object as individual lines.
Parameters
----------
obj : object
The Python object (class, function or method) to extract docstrings
from.
Returns
-------
lines : `list` of `str`
Individual docstring lines with common indentation removed, and
newline characters stripped.
Notes
-----
If the object does not have a docstring, a docstring with the content
``"Undocumented."`` is created.
"""
docstring = getdoc(obj, allow_inherited=True)
if docstring is None:
logger = getLogger(__name__)
logger.warning("Object %s doesn't have a docstring.", obj)
docstring = 'Undocumented'
# ignore is simply the number of initial lines to ignore when determining
# the docstring's baseline indent level. We really want "1" here.
return prepare_docstring(docstring, ignore=1) |
def get_number_of_app_ports(app):
"""
Get the number of ports for the given app JSON. This roughly follows the
logic in marathon-lb for finding app IPs/ports, although we are only
interested in the quantity of ports an app should have and don't consider
the specific IPs/ports of individual tasks:
https://github.com/mesosphere/marathon-lb/blob/v1.10.3/utils.py#L393-L415
:param app: The app JSON from the Marathon API.
:return: The number of ports for the app.
"""
mode = _get_networking_mode(app)
ports_list = None
if mode == 'host':
ports_list = _get_port_definitions(app)
elif mode == 'container/bridge':
ports_list = _get_port_definitions(app)
if ports_list is None:
ports_list = _get_container_port_mappings(app)
elif mode == 'container':
ports_list = _get_ip_address_discovery_ports(app)
# Marathon 1.5+: the ipAddress field is missing -> ports_list is None
# Marathon <1.5: the ipAddress field can be present, but ports_list can
# still be empty while the container port mapping is not :-/
if not ports_list:
ports_list = _get_container_port_mappings(app)
else:
raise RuntimeError(
"Unknown Marathon networking mode '{}'".format(mode))
return len(ports_list) | Get the number of ports for the given app JSON. This roughly follows the
logic in marathon-lb for finding app IPs/ports, although we are only
interested in the quantity of ports an app should have and don't consider
the specific IPs/ports of individual tasks:
https://github.com/mesosphere/marathon-lb/blob/v1.10.3/utils.py#L393-L415
:param app: The app JSON from the Marathon API.
:return: The number of ports for the app. | Below is the the instruction that describes the task:
### Input:
Get the number of ports for the given app JSON. This roughly follows the
logic in marathon-lb for finding app IPs/ports, although we are only
interested in the quantity of ports an app should have and don't consider
the specific IPs/ports of individual tasks:
https://github.com/mesosphere/marathon-lb/blob/v1.10.3/utils.py#L393-L415
:param app: The app JSON from the Marathon API.
:return: The number of ports for the app.
### Response:
def get_number_of_app_ports(app):
"""
Get the number of ports for the given app JSON. This roughly follows the
logic in marathon-lb for finding app IPs/ports, although we are only
interested in the quantity of ports an app should have and don't consider
the specific IPs/ports of individual tasks:
https://github.com/mesosphere/marathon-lb/blob/v1.10.3/utils.py#L393-L415
:param app: The app JSON from the Marathon API.
:return: The number of ports for the app.
"""
mode = _get_networking_mode(app)
ports_list = None
if mode == 'host':
ports_list = _get_port_definitions(app)
elif mode == 'container/bridge':
ports_list = _get_port_definitions(app)
if ports_list is None:
ports_list = _get_container_port_mappings(app)
elif mode == 'container':
ports_list = _get_ip_address_discovery_ports(app)
# Marathon 1.5+: the ipAddress field is missing -> ports_list is None
# Marathon <1.5: the ipAddress field can be present, but ports_list can
# still be empty while the container port mapping is not :-/
if not ports_list:
ports_list = _get_container_port_mappings(app)
else:
raise RuntimeError(
"Unknown Marathon networking mode '{}'".format(mode))
return len(ports_list) |
def is_element_in_an_iframe(self, selector, by=By.CSS_SELECTOR):
""" Returns True if the selector's element is located in an iframe.
Otherwise returns False. """
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_present(selector, by=by):
return False
soup = self.get_beautiful_soup()
iframe_list = soup.select('iframe')
for iframe in iframe_list:
iframe_identifier = None
if iframe.has_attr('name') and len(iframe['name']) > 0:
iframe_identifier = iframe['name']
elif iframe.has_attr('id') and len(iframe['id']) > 0:
iframe_identifier = iframe['id']
else:
continue
self.switch_to_frame(iframe_identifier)
if self.is_element_present(selector, by=by):
self.switch_to_default_content()
return True
self.switch_to_default_content()
return False | Returns True if the selector's element is located in an iframe.
Otherwise returns False. | Below is the the instruction that describes the task:
### Input:
Returns True if the selector's element is located in an iframe.
Otherwise returns False.
### Response:
def is_element_in_an_iframe(self, selector, by=By.CSS_SELECTOR):
""" Returns True if the selector's element is located in an iframe.
Otherwise returns False. """
selector, by = self.__recalculate_selector(selector, by)
if self.is_element_present(selector, by=by):
return False
soup = self.get_beautiful_soup()
iframe_list = soup.select('iframe')
for iframe in iframe_list:
iframe_identifier = None
if iframe.has_attr('name') and len(iframe['name']) > 0:
iframe_identifier = iframe['name']
elif iframe.has_attr('id') and len(iframe['id']) > 0:
iframe_identifier = iframe['id']
else:
continue
self.switch_to_frame(iframe_identifier)
if self.is_element_present(selector, by=by):
self.switch_to_default_content()
return True
self.switch_to_default_content()
return False |
def filter(self, *args, **kwargs):
'''filter
High-level api: Filter the config using xpath method. If namespaces is
not given, self.ns is used by default.
Returns
-------
Config
A new Config instance which has less content according to your
filter xpath expression.
'''
ancestors = set()
filtrates = set()
config = type(self)(self.device, deepcopy(self.ele))
results = config.xpath(*args, **kwargs)
if isinstance(results, list):
for node in results:
if etree.iselement(node):
ancestors |= set(list(node.iterancestors()))
filtrates.add(node)
if filtrates:
config._node_filter(config.ele, ancestors, filtrates)
else:
config.ele = etree.Element(config_tag, nsmap={'nc': nc_url})
return config | filter
High-level api: Filter the config using xpath method. If namespaces is
not given, self.ns is used by default.
Returns
-------
Config
A new Config instance which has less content according to your
filter xpath expression. | Below is the the instruction that describes the task:
### Input:
filter
High-level api: Filter the config using xpath method. If namespaces is
not given, self.ns is used by default.
Returns
-------
Config
A new Config instance which has less content according to your
filter xpath expression.
### Response:
def filter(self, *args, **kwargs):
'''filter
High-level api: Filter the config using xpath method. If namespaces is
not given, self.ns is used by default.
Returns
-------
Config
A new Config instance which has less content according to your
filter xpath expression.
'''
ancestors = set()
filtrates = set()
config = type(self)(self.device, deepcopy(self.ele))
results = config.xpath(*args, **kwargs)
if isinstance(results, list):
for node in results:
if etree.iselement(node):
ancestors |= set(list(node.iterancestors()))
filtrates.add(node)
if filtrates:
config._node_filter(config.ele, ancestors, filtrates)
else:
config.ele = etree.Element(config_tag, nsmap={'nc': nc_url})
return config |
def count(self, query=None, distinct=False, distinct_fields=None):
"""
Returns the number of rows satisying a criteria, if provided.
:Parameters:
- query: specify the WHERE clause
- distinct : boolean, whether to use DISTINCT()
- distinct_fields : list or tuple or strings. Each string is
a column name used inside COUNT(). If none, '*' will be
used.
:Return: int, the number of rows
"""
if distinct_fields is None:
self.__ensure_columns()
if distinct:
field = ','.join(self.columns)
else:
field = '*'
else:
field = ','.join(distinct_fields)
if distinct:
count_str = 'DISTINCT(%s)' %(field)
else:
count_str = field
count_str = 'COUNT(%s)' %(count_str)
sql = 'SELECT %s FROM %s' %(count_str, self.name)
if query is not None:
query_str = build_query(query)
if query_str:
sql = sql + ' WHERE ' + query_str
self.cursor.execute(sql)
count = self.cursor.fetchone()[0]
return count | Returns the number of rows satisying a criteria, if provided.
:Parameters:
- query: specify the WHERE clause
- distinct : boolean, whether to use DISTINCT()
- distinct_fields : list or tuple or strings. Each string is
a column name used inside COUNT(). If none, '*' will be
used.
:Return: int, the number of rows | Below is the the instruction that describes the task:
### Input:
Returns the number of rows satisying a criteria, if provided.
:Parameters:
- query: specify the WHERE clause
- distinct : boolean, whether to use DISTINCT()
- distinct_fields : list or tuple or strings. Each string is
a column name used inside COUNT(). If none, '*' will be
used.
:Return: int, the number of rows
### Response:
def count(self, query=None, distinct=False, distinct_fields=None):
"""
Returns the number of rows satisying a criteria, if provided.
:Parameters:
- query: specify the WHERE clause
- distinct : boolean, whether to use DISTINCT()
- distinct_fields : list or tuple or strings. Each string is
a column name used inside COUNT(). If none, '*' will be
used.
:Return: int, the number of rows
"""
if distinct_fields is None:
self.__ensure_columns()
if distinct:
field = ','.join(self.columns)
else:
field = '*'
else:
field = ','.join(distinct_fields)
if distinct:
count_str = 'DISTINCT(%s)' %(field)
else:
count_str = field
count_str = 'COUNT(%s)' %(count_str)
sql = 'SELECT %s FROM %s' %(count_str, self.name)
if query is not None:
query_str = build_query(query)
if query_str:
sql = sql + ' WHERE ' + query_str
self.cursor.execute(sql)
count = self.cursor.fetchone()[0]
return count |
def absolute_name(self):
"""Get the absolute name of ``self``.
Returns:
str: the absolute name.
"""
if self.is_root() or self.parent.is_root():
return utils.slugify(self.name)
return ':'.join([self.parent.absolute_name, utils.slugify(self.name)]) | Get the absolute name of ``self``.
Returns:
str: the absolute name. | Below is the the instruction that describes the task:
### Input:
Get the absolute name of ``self``.
Returns:
str: the absolute name.
### Response:
def absolute_name(self):
"""Get the absolute name of ``self``.
Returns:
str: the absolute name.
"""
if self.is_root() or self.parent.is_root():
return utils.slugify(self.name)
return ':'.join([self.parent.absolute_name, utils.slugify(self.name)]) |
def send_single_text(self, sender, receiver, content):
"""
发送单聊文本消息
:param sender: 发送人
:param receiver: 接收人成员 ID
:param content: 消息内容
:return: 返回的 JSON 数据包
"""
return self.send_text(sender, 'single', receiver, content) | 发送单聊文本消息
:param sender: 发送人
:param receiver: 接收人成员 ID
:param content: 消息内容
:return: 返回的 JSON 数据包 | Below is the the instruction that describes the task:
### Input:
发送单聊文本消息
:param sender: 发送人
:param receiver: 接收人成员 ID
:param content: 消息内容
:return: 返回的 JSON 数据包
### Response:
def send_single_text(self, sender, receiver, content):
"""
发送单聊文本消息
:param sender: 发送人
:param receiver: 接收人成员 ID
:param content: 消息内容
:return: 返回的 JSON 数据包
"""
return self.send_text(sender, 'single', receiver, content) |
def enable_save_reply_handlers(self, delay=120, filename="./.handler-saves/reply.save"):
"""
Enable saving reply handlers (by default saving disable)
:param delay: Delay between changes in handlers and saving
:param filename: Filename of save file
"""
self.reply_saver = Saver(self.reply_handlers, filename, delay) | Enable saving reply handlers (by default saving disable)
:param delay: Delay between changes in handlers and saving
:param filename: Filename of save file | Below is the the instruction that describes the task:
### Input:
Enable saving reply handlers (by default saving disable)
:param delay: Delay between changes in handlers and saving
:param filename: Filename of save file
### Response:
def enable_save_reply_handlers(self, delay=120, filename="./.handler-saves/reply.save"):
"""
Enable saving reply handlers (by default saving disable)
:param delay: Delay between changes in handlers and saving
:param filename: Filename of save file
"""
self.reply_saver = Saver(self.reply_handlers, filename, delay) |
def get_lines_from_file(filename, lineno, context_lines):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
def get_lines(start, end):
return [linecache.getline(filename, l).rstrip() for l in range(start, end)]
lower_bound = max(1, lineno - context_lines)
upper_bound = lineno + context_lines
linecache.checkcache(filename)
pre_context = get_lines(lower_bound, lineno)
context_line = linecache.getline(filename, lineno).rstrip()
post_context = get_lines(lineno + 1, upper_bound)
return lower_bound, pre_context, context_line, post_context | Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context). | Below is the the instruction that describes the task:
### Input:
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
### Response:
def get_lines_from_file(filename, lineno, context_lines):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
def get_lines(start, end):
return [linecache.getline(filename, l).rstrip() for l in range(start, end)]
lower_bound = max(1, lineno - context_lines)
upper_bound = lineno + context_lines
linecache.checkcache(filename)
pre_context = get_lines(lower_bound, lineno)
context_line = linecache.getline(filename, lineno).rstrip()
post_context = get_lines(lineno + 1, upper_bound)
return lower_bound, pre_context, context_line, post_context |
def import_list(
self,
listName,
pathToTaskpaperDoc
):
"""
*import tasks from a reminder.app list into a given taskpaper document*
**Key Arguments:**
- ``listName`` -- the name of the reminders list
- ``pathToTaskpaperDoc`` -- the path to the taskpaper document to import the tasks into
**Usage:**
The following will import tasks from a Reminder.app list into a taskpaper document. Tasks are added to any existing content in the taskpaper document, or if the docuement doesn't yet exist it will be created for you. Tasks are deleted from the remainds list once import is complete.
.. code-block:: python
r.import_list(
listName="listname",
pathToTaskpaperDoc="/path/to/my/doc.taskpaper"
)
"""
self.log.info('starting the ``import_list`` method')
newTasks = self._get_tasks_from_reminder_list(listName)
self._add_tasks_to_taskpaper(
pathToTaskpaperDoc=pathToTaskpaperDoc,
taskString=newTasks
)
self._delete_reminders_from_list(
listName=listName
)
self.log.info('completed the ``import_list`` method')
return newTasks | *import tasks from a reminder.app list into a given taskpaper document*
**Key Arguments:**
- ``listName`` -- the name of the reminders list
- ``pathToTaskpaperDoc`` -- the path to the taskpaper document to import the tasks into
**Usage:**
The following will import tasks from a Reminder.app list into a taskpaper document. Tasks are added to any existing content in the taskpaper document, or if the docuement doesn't yet exist it will be created for you. Tasks are deleted from the remainds list once import is complete.
.. code-block:: python
r.import_list(
listName="listname",
pathToTaskpaperDoc="/path/to/my/doc.taskpaper"
) | Below is the the instruction that describes the task:
### Input:
*import tasks from a reminder.app list into a given taskpaper document*
**Key Arguments:**
- ``listName`` -- the name of the reminders list
- ``pathToTaskpaperDoc`` -- the path to the taskpaper document to import the tasks into
**Usage:**
The following will import tasks from a Reminder.app list into a taskpaper document. Tasks are added to any existing content in the taskpaper document, or if the docuement doesn't yet exist it will be created for you. Tasks are deleted from the remainds list once import is complete.
.. code-block:: python
r.import_list(
listName="listname",
pathToTaskpaperDoc="/path/to/my/doc.taskpaper"
)
### Response:
def import_list(
self,
listName,
pathToTaskpaperDoc
):
"""
*import tasks from a reminder.app list into a given taskpaper document*
**Key Arguments:**
- ``listName`` -- the name of the reminders list
- ``pathToTaskpaperDoc`` -- the path to the taskpaper document to import the tasks into
**Usage:**
The following will import tasks from a Reminder.app list into a taskpaper document. Tasks are added to any existing content in the taskpaper document, or if the docuement doesn't yet exist it will be created for you. Tasks are deleted from the remainds list once import is complete.
.. code-block:: python
r.import_list(
listName="listname",
pathToTaskpaperDoc="/path/to/my/doc.taskpaper"
)
"""
self.log.info('starting the ``import_list`` method')
newTasks = self._get_tasks_from_reminder_list(listName)
self._add_tasks_to_taskpaper(
pathToTaskpaperDoc=pathToTaskpaperDoc,
taskString=newTasks
)
self._delete_reminders_from_list(
listName=listName
)
self.log.info('completed the ``import_list`` method')
return newTasks |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.