text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_memory(s):
"""Converts bytes expression to number of mebibytes. If no unit is specified, ``MiB`` is used.""" |
if isinstance(s, integer):
out = s
elif isinstance(s, float):
out = math_ceil(s)
elif isinstance(s, string):
s = s.replace(' ', '')
if not s:
raise context.ValueError("Could not interpret %r as a byte unit" % s)
if s[0].isdigit():
for i, c in enumerate(reversed(s)):
if not c.isalpha():
break
index = len(s) - i
prefix = s[:index]
suffix = s[index:]
try:
n = float(prefix)
except ValueError:
raise context.ValueError("Could not interpret %r as a number" % prefix)
else:
n = 1
suffix = s
try:
multiplier = _byte_sizes[suffix.lower()]
except KeyError:
raise context.ValueError("Could not interpret %r as a byte unit" % suffix)
out = math_ceil(n * multiplier / (2 ** 20))
else:
raise context.TypeError("memory must be an integer, got %r"
% type(s).__name__)
if out < 0:
raise context.ValueError("memory must be positive")
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_default(cls):
"""The default security configuration. Usually this loads the credentials stored in the configuration directory (``~/.skein`` by default). If these credentials don't already exist, new ones will be created. When run in a YARN container started by Skein, this loads the same security credentials as used for the current application. """ |
from .core import properties
# Are we in a container started by skein?
if properties.application_id is not None:
if properties.container_dir is not None:
cert_path = os.path.join(properties.container_dir, '.skein.crt')
key_path = os.path.join(properties.container_dir, '.skein.pem')
if os.path.exists(cert_path) and os.path.exists(key_path):
return Security(cert_file=cert_path, key_file=key_path)
raise context.FileNotFoundError(
"Failed to resolve .skein.{crt,pem} in 'LOCAL_DIRS'")
# Try to load from config_dir, and fallback to minting new credentials
try:
return cls.from_directory(properties.config_dir)
except FileNotFoundError:
pass
new = cls.new_credentials()
try:
out = new.to_directory(properties.config_dir)
context.warn("Skein global security credentials not found, "
"writing now to %r." % properties.config_dir)
except FileExistsError:
# Race condition between competing processes, use the credentials
# written by the other process.
out = cls.from_directory(properties.config_dir)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_directory(cls, directory):
"""Create a security object from a directory. Relies on standard names for each file (``skein.crt`` and ``skein.pem``).""" |
cert_path = os.path.join(directory, 'skein.crt')
key_path = os.path.join(directory, 'skein.pem')
for path, name in [(cert_path, 'cert'), (key_path, 'key')]:
if not os.path.exists(path):
raise context.FileNotFoundError(
"Security %s file not found at %r" % (name, path)
)
return Security(cert_file=cert_path, key_file=key_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_directory(self, directory, force=False):
"""Write this security object to a directory. Parameters directory : str The directory to write the configuration to. force : bool, optional If security credentials already exist at this location, an error will be raised by default. Set to True to overwrite existing files. Returns ------- security : Security A new security object backed by the written files. """ |
self._validate()
# Create directory if it doesn't exist
makedirs(directory, exist_ok=True)
cert_path = os.path.join(directory, 'skein.crt')
key_path = os.path.join(directory, 'skein.pem')
cert_bytes = self._get_bytes('cert')
key_bytes = self._get_bytes('key')
lock_path = os.path.join(directory, 'skein.lock')
with lock_file(lock_path):
for path, name in [(cert_path, 'skein.crt'), (key_path, 'skein.pem')]:
if os.path.exists(path):
if force:
os.unlink(path)
else:
msg = ("%r file already exists, use `%s` to overwrite" %
(name, '--force' if context.is_cli else 'force'))
raise context.FileExistsError(msg)
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
for path, data in [(cert_path, cert_bytes), (key_path, key_bytes)]:
with os.fdopen(os.open(path, flags, 0o600), 'wb') as fil:
fil.write(data)
return Security(cert_file=cert_path, key_file=key_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _from_any(cls, spec):
"""Generic creation method for all types accepted as ``spec``""" |
if isinstance(spec, str):
spec = cls.from_file(spec)
elif isinstance(spec, dict):
spec = cls.from_dict(spec)
elif not isinstance(spec, cls):
raise context.TypeError("spec must be either an ApplicationSpec, "
"path, or dict, got "
"%s" % type(spec).__name__)
return spec |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_file(cls, path, format='infer'):
"""Create an instance from a json or yaml file. Parameters path : str The path to the file to load. format : {'infer', 'json', 'yaml'}, optional The file format. By default the format is inferred from the file extension. """ |
format = _infer_format(path, format=format)
origin = os.path.abspath(os.path.dirname(path))
with open(path) as f:
data = f.read()
if format == 'json':
obj = json.loads(data)
else:
obj = yaml.safe_load(data)
return cls.from_dict(obj, _origin=origin) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_file(self, path, format='infer', skip_nulls=True):
"""Write object to a file. Parameters path : str The path to the file to load. format : {'infer', 'json', 'yaml'}, optional The file format. By default the format is inferred from the file extension. skip_nulls : bool, optional By default null values are skipped in the output. Set to True to output all fields. """ |
format = _infer_format(path, format=format)
data = getattr(self, 'to_' + format)(skip_nulls=skip_nulls)
with open(path, mode='w') as f:
f.write(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lock_file(path):
"""File based lock on ``path``. Creates a file based lock. When acquired, other processes or threads are prevented from acquiring the same lock until it is released. """ |
with _paths_lock:
lock = _paths_to_locks.get(path)
if lock is None:
_paths_to_locks[path] = lock = _FileLock(path)
return lock |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grpc_fork_support_disabled():
"""Temporarily disable fork support in gRPC. Fork + exec has always been supported, but the recent fork handling code in gRPC (>= 1.15) results in extraneous error logs currently. For now we explicitly disable fork support for gRPC clients we create. """ |
if LooseVersion(GRPC_VERSION) < '1.18.0':
key = 'GRPC_ENABLE_FORK_SUPPORT'
try:
os.environ[key] = '0'
yield
finally:
del os.environ[key]
else:
yield |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def humanize_timedelta(td):
"""Pretty-print a timedelta in a human readable format.""" |
secs = int(td.total_seconds())
hours, secs = divmod(secs, 60 * 60)
mins, secs = divmod(secs, 60)
if hours:
return '%dh %dm' % (hours, mins)
if mins:
return '%dm' % mins
return '%ds' % secs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def datetime_to_millis(x):
"""Convert a `datetime.datetime` to milliseconds since the epoch""" |
if x is None:
return None
if hasattr(x, 'timestamp'):
# Python >= 3.3
secs = x.timestamp()
elif x.tzinfo is None:
# Timezone naive
secs = (time.mktime((x.year, x.month, x.day,
x.hour, x.minute, x.second,
-1, -1, -1)) + x.microsecond / 1e6)
else:
# Timezone aware
secs = (x - _EPOCH).total_seconds()
return int(secs * 1000) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_table(columns, rows):
"""Formats an ascii table for given columns and rows. Parameters columns : list The column names rows : list of tuples The rows in the table. Each tuple must be the same length as ``columns``. """ |
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i).upper() for i in columns)
if rows:
widths = tuple(max(max(map(len, x)), len(c))
for x, c in zip(zip(*rows), columns))
else:
widths = tuple(map(len, columns))
row_template = (' '.join('%%-%ds' for _ in columns)) % widths
header = (row_template % tuple(columns)).strip()
if rows:
data = '\n'.join((row_template % r).strip() for r in rows)
return '\n'.join([header, data])
else:
return header |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_attrs(self, *args, **kwargs):
"""Add select2 data attributes.""" |
attrs = super(Select2Mixin, self).build_attrs(*args, **kwargs)
if self.is_required:
attrs.setdefault('data-allow-clear', 'false')
else:
attrs.setdefault('data-allow-clear', 'true')
attrs.setdefault('data-placeholder', '')
attrs.setdefault('data-minimum-input-length', 0)
if 'class' in attrs:
attrs['class'] += ' django-select2'
else:
attrs['class'] = 'django-select2'
return attrs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def optgroups(self, name, value, attrs=None):
"""Add empty option for clearable selects.""" |
if not self.is_required and not self.allow_multiple_selected:
self.choices = list(chain([('', '')], self.choices))
return super(Select2Mixin, self).optgroups(name, value, attrs=attrs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_media(self):
""" Construct Media as a dynamic property. .. Note:: For more information visit https://docs.djangoproject.com/en/stable/topics/forms/media/#media-as-a-dynamic-property """ |
lang = get_language()
select2_js = (settings.SELECT2_JS,) if settings.SELECT2_JS else ()
select2_css = (settings.SELECT2_CSS,) if settings.SELECT2_CSS else ()
i18n_name = SELECT2_TRANSLATIONS.get(lang)
if i18n_name not in settings.SELECT2_I18N_AVAILABLE_LANGUAGES:
i18n_name = None
i18n_file = ('%s/%s.js' % (settings.SELECT2_I18N_PATH, i18n_name),) if i18n_name else ()
return forms.Media(
js=select2_js + i18n_file + ('django_select2/django_select2.js',),
css={'screen': select2_css}
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_attrs(self, *args, **kwargs):
"""Add select2's tag attributes.""" |
self.attrs.setdefault('data-minimum-input-length', 1)
self.attrs.setdefault('data-tags', 'true')
self.attrs.setdefault('data-token-separators', '[",", " "]')
return super(Select2TagMixin, self).build_attrs(*args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_attrs(self, *args, **kwargs):
"""Set select2's AJAX attributes.""" |
attrs = super(HeavySelect2Mixin, self).build_attrs(*args, **kwargs)
# encrypt instance Id
self.widget_id = signing.dumps(id(self))
attrs['data-field_id'] = self.widget_id
attrs.setdefault('data-ajax--url', self.get_url())
attrs.setdefault('data-ajax--cache', "true")
attrs.setdefault('data-ajax--type', "GET")
attrs.setdefault('data-minimum-input-length', 2)
if self.dependent_fields:
attrs.setdefault('data-select2-dependent-fields', " ".join(self.dependent_fields))
attrs['class'] += ' django-select2-heavy'
return attrs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render(self, *args, **kwargs):
"""Render widget and register it in Django's cache.""" |
output = super(HeavySelect2Mixin, self).render(*args, **kwargs)
self.set_to_cache()
return output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_to_cache(self):
""" Add widget object to Django's cache. You may need to overwrite this method, to pickle all information that is required to serve your JSON response view. """ |
try:
cache.set(self._get_cache_key(), {
'widget': self,
'url': self.get_url(),
})
except (PicklingError, AttributeError):
msg = "You need to overwrite \"set_to_cache\" or ensure that %s is serialisable."
raise NotImplementedError(msg % self.__class__.__name__) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_to_cache(self):
""" Add widget's attributes to Django's cache. Split the QuerySet, to not pickle the result set. """ |
queryset = self.get_queryset()
cache.set(self._get_cache_key(), {
'queryset':
[
queryset.none(),
queryset.query,
],
'cls': self.__class__,
'search_fields': tuple(self.search_fields),
'max_results': int(self.max_results),
'url': str(self.get_url()),
'dependent_fields': dict(self.dependent_fields),
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_queryset(self, request, term, queryset=None, **dependent_fields):
""" Return QuerySet filtered by search_fields matching the passed term. Args: request (django.http.request.HttpRequest):
The request is being passed from the JSON view and can be used to dynamically alter the response queryset. term (str):
Search term queryset (django.db.models.query.QuerySet):
QuerySet to select choices from. **dependent_fields: Dependent fields and their values. If you want to inherit from ModelSelect2Mixin and later call to this method, be sure to pop everything from keyword arguments that is not a dependent field. Returns: QuerySet: Filtered QuerySet """ |
if queryset is None:
queryset = self.get_queryset()
search_fields = self.get_search_fields()
select = Q()
term = term.replace('\t', ' ')
term = term.replace('\n', ' ')
for t in [t for t in term.split(' ') if not t == '']:
select &= reduce(lambda x, y: x | Q(**{y: t}), search_fields,
Q(**{search_fields[0]: t}))
if dependent_fields:
select &= Q(**dependent_fields)
return queryset.filter(select).distinct() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_search_fields(self):
"""Return list of lookup names.""" |
if self.search_fields:
return self.search_fields
raise NotImplementedError('%s, must implement "search_fields".' % self.__class__.__name__) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def optgroups(self, name, value, attrs=None):
"""Return only selected options and set QuerySet from `ModelChoicesIterator`.""" |
default = (None, [], 0)
groups = [default]
has_selected = False
selected_choices = {str(v) for v in value}
if not self.is_required and not self.allow_multiple_selected:
default[1].append(self.create_option(name, '', '', False, 0))
if not isinstance(self.choices, ModelChoiceIterator):
return super(ModelSelect2Mixin, self).optgroups(name, value, attrs=attrs)
selected_choices = {
c for c in selected_choices
if c not in self.choices.field.empty_values
}
field_name = self.choices.field.to_field_name or 'pk'
query = Q(**{'%s__in' % field_name: selected_choices})
for obj in self.choices.queryset.filter(query):
option_value = self.choices.choice(obj)[0]
option_label = self.label_from_instance(obj)
selected = (
str(option_value) in value and
(has_selected is False or self.allow_multiple_selected)
)
if selected is True and has_selected is False:
has_selected = True
index = len(default[1])
subgroup = default[1]
subgroup.append(self.create_option(name, option_value, option_label, selected_choices, index))
return groups |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_queryset(self):
"""Get QuerySet from cached widget.""" |
kwargs = {
model_field_name: self.request.GET.get(form_field_name)
for form_field_name, model_field_name in self.widget.dependent_fields.items()
if form_field_name in self.request.GET and self.request.GET.get(form_field_name, '') != ''
}
return self.widget.filter_queryset(self.request, self.term, self.queryset, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_widget_or_404(self):
""" Get and return widget from cache. Raises: Http404: If if the widget can not be found or no id is provided. Returns: ModelSelect2Mixin: Widget from cache. """ |
field_id = self.kwargs.get('field_id', self.request.GET.get('field_id', None))
if not field_id:
raise Http404('No "field_id" provided.')
try:
key = signing.loads(field_id)
except BadSignature:
raise Http404('Invalid "field_id".')
else:
cache_key = '%s%s' % (settings.SELECT2_CACHE_PREFIX, key)
widget_dict = cache.get(cache_key)
if widget_dict is None:
raise Http404('field_id not found')
if widget_dict.pop('url') != self.request.path:
raise Http404('field_id was issued for the view.')
qs, qs.query = widget_dict.pop('queryset')
self.queryset = qs.all()
widget_dict['queryset'] = self.queryset
widget_cls = widget_dict.pop('cls')
return widget_cls(**widget_dict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_widget(path):
""" Load custom widget for the form field """ |
i = path.rfind('.')
module, attr = path[:i], path[i + 1:]
try:
mod = import_module(module)
except (ImportError, ValueError) as e:
error_message = 'Error importing widget for BleachField %s: "%s"'
raise ImproperlyConfigured(error_message % (path, e))
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured(
'Module "%s" does not define a "%s" widget' % (module, attr)
)
return cls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_default_widget():
""" Get the default widget or the widget defined in settings """ |
default_widget = forms.Textarea
if hasattr(settings, 'BLEACH_DEFAULT_WIDGET'):
default_widget = load_widget(settings.BLEACH_DEFAULT_WIDGET)
return default_widget |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_python(self, value):
""" Strips any dodgy HTML tags from the input """ |
if value in self.empty_values:
try:
return self.empty_value
except AttributeError:
# CharField.empty_value was introduced in Django 1.11; in prior
# versions a unicode string was returned for empty values in
# all cases.
return u''
return bleach.clean(value, **self.bleach_options) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_dynamic_field(self, group, field_meta):
""" Builds a field based on JIRA's meta field information """ |
schema = field_meta['schema']
# set up some defaults for form fields
fieldtype = 'text'
fkwargs = {
'label': field_meta['name'],
'required': field_meta['required'],
}
# override defaults based on field configuration
if (schema['type'] in ['securitylevel', 'priority']
or schema.get('custom') == JIRA_CUSTOM_FIELD_TYPES['select']):
fieldtype = 'select'
fkwargs['choices'] = self.make_choices(field_meta.get('allowedValues'))
elif field_meta.get('autoCompleteUrl') and \
(schema.get('items') == 'user' or schema['type'] == 'user'):
fieldtype = 'select'
sentry_url = '/api/0/issues/%s/plugins/%s/autocomplete' % (group.id, self.slug)
fkwargs['url'] = '%s?jira_url=%s' % (
sentry_url, quote_plus(field_meta['autoCompleteUrl']),
)
fkwargs['has_autocomplete'] = True
fkwargs['placeholder'] = 'Start typing to search for a user'
elif schema['type'] in ['timetracking']:
# TODO: Implement timetracking (currently unsupported alltogether)
return None
elif schema.get('items') in ['worklog', 'attachment']:
# TODO: Implement worklogs and attachments someday
return None
elif schema['type'] == 'array' and schema['items'] != 'string':
fieldtype = 'select'
fkwargs.update(
{
'multiple': True,
'choices': self.make_choices(field_meta.get('allowedValues')),
'default': []
}
)
# break this out, since multiple field types could additionally
# be configured to use a custom property instead of a default.
if schema.get('custom'):
if schema['custom'] == JIRA_CUSTOM_FIELD_TYPES['textarea']:
fieldtype = 'textarea'
fkwargs['type'] = fieldtype
return fkwargs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_issue(self, request, group, form_data, **kwargs):
""" Creates the issue on the remote service and returns an issue ID. """ |
instance = self.get_option('instance', group.project)
project = (
form_data.get('project') or
self.get_option('default_project', group.project)
)
client = self.get_client(request.user)
title = form_data['title']
description = form_data['description']
link = absolute_uri(group.get_absolute_url(params={'referrer': 'vsts_plugin'}))
try:
created_item = client.create_work_item(
instance=instance,
project=project,
title=title,
comment=markdown(description),
link=link,
)
except Exception as e:
self.raise_error(e, identity=client.auth)
return {
'id': created_item['id'],
'url': created_item['_links']['html']['href'],
'title': title,
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _format_date_param(params, key, format="%Y-%m-%d %H:%M:%S"):
""" Utility function to convert datetime values to strings. If the value is already a str, or is not in the dict, no change is made. :param params: A `dict` of params that may contain a `datetime` value. :param key: The datetime value to be converted to a `str` :param format: The `strftime` format to be used to format the date. The default value is '%Y-%m-%d %H:%M:%S' """ |
if key in params:
param = params[key]
if hasattr(param, "strftime"):
params[key] = param.strftime(format) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def submit_sms_conversion(self, message_id, delivered=True, timestamp=None):
""" Notify Nexmo that an SMS was successfully received. :param message_id: The `message-id` str returned by the send_message call. :param delivered: A `bool` indicating that the message was or was not successfully delivered. :param timestamp: A `datetime` object containing the time the SMS arrived. :return: The parsed response from the server. On success, the bytestring b'OK' """ |
params = {
"message-id": message_id,
"delivered": delivered,
"timestamp": timestamp or datetime.now(pytz.utc),
}
# Ensure timestamp is a string:
_format_date_param(params, "timestamp")
return self.post(self.api_host, "/conversions/sms", params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_stripped_file_lines(filename):
""" Return lines of a file with whitespace removed """ |
try:
lines = open(filename).readlines()
except FileNotFoundError:
fatal("Could not open file: {!r}".format(filename))
return [line.strip() for line in lines] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def create_settings(from_environment=False, locustfile=None,
classes=None, host=None, num_clients=None,
hatch_rate=None, reset_stats=False, run_time="3m"):
'''
Returns a settings object to be used by a LocalLocustRunner.
Arguments
from_environment: get settings from environment variables
locustfile: locustfile to use for loadtest
classes: locust classes to use for load test
host: host for load testing
num_clients: number of clients to simulate in load test
hatch_rate: number of clients per second to start
reset_stats: Whether to reset stats after all clients are hatched
run_time: The length of time to run the test for. Cannot exceed the duration limit set by lambda
If from_environment is set to True then this function will attempt to set
the attributes from environment variables. The environment variables are
named LOCUST_ + attribute name in upper case.
'''
settings = type('', (), {})()
settings.from_environment = from_environment
settings.locustfile = locustfile
settings.classes = classes
settings.host = host
settings.num_clients = num_clients
settings.hatch_rate = hatch_rate
settings.reset_stats = reset_stats
settings.run_time = run_time
# Default settings that are not to be changed
settings.no_web = True
settings.master = False
settings.show_task_ratio_json = False
settings.list_commands = False
settings.loglevel = 'INFO'
settings.slave = False
settings.only_summary = True
settings.logfile = None
settings.show_task_ratio = False
settings.print_stats = False
if from_environment:
for attribute in ['locustfile', 'classes', 'host', 'run_time', 'num_clients', 'hatch_rate']:
var_name = 'LOCUST_{0}'.format(attribute.upper())
var_value = os.environ.get(var_name)
if var_value:
setattr(settings, attribute, var_value)
if settings.locustfile is None and settings.classes is None:
raise Exception('One of locustfile or classes must be specified')
if settings.locustfile and settings.classes:
raise Exception('Only one of locustfile or classes can be specified')
if settings.locustfile:
docstring, classes = load_locustfile(settings.locustfile)
settings.classes = [classes[n] for n in classes]
else:
if isinstance(settings.classes, str):
settings.classes = settings.classes.split(',')
for idx, val in enumerate(settings.classes):
# This needs fixing
settings.classes[idx] = eval(val)
for attribute in ['classes', 'host', 'num_clients', 'hatch_rate']:
val = getattr(settings, attribute, None)
if not val:
raise Exception('configuration error, attribute not set: {0}'.format(attribute))
if isinstance(val, str) and val.isdigit():
setattr(settings, attribute, int(val))
return settings |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_lambda_runtime_info(context):
'''
Returns a dictionary of information about the AWS Lambda function invocation
Arguments
context: The context object from AWS Lambda.
'''
runtime_info = {
'remaining_time': context.get_remaining_time_in_millis(),
'function_name': context.function_name,
'function_version': context.function_version,
'invoked_function_arn': context.invoked_function_arn,
'memory_limit': context.memory_limit_in_mb,
'aws_request_id': context.aws_request_id,
'log_group_name': context.log_group_name,
'log_stream_name': context.log_stream_name
}
return runtime_info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solvedbi_sm(ah, rho, b, c=None, axis=4):
r""" Solve a diagonal block linear system with a scaled identity term using the Sherman-Morrison equation. The solution is obtained by independently solving a set of linear systems of the form (see :cite:`wohlberg-2016-efficient`) .. math:: (\rho I + \mathbf{a} \mathbf{a}^H ) \; \mathbf{x} = \mathbf{b} \;\;. In this equation inner products and matrix products are taken along the specified axis of the corresponding multi-dimensional arrays; the solutions are independent over the other axes. Parameters ah : array_like Linear system component :math:`\mathbf{a}^H` rho : float Linear system parameter :math:`\rho` b : array_like Linear system component :math:`\mathbf{b}` c : array_like, optional (default None) Solution component :math:`\mathbf{c}` that may be pre-computed using :func:`solvedbi_sm_c` and cached for re-use. axis : int, optional (default 4) Axis along which to solve the linear system Returns ------- x : ndarray Linear system solution :math:`\mathbf{x}` """ |
a = np.conj(ah)
if c is None:
c = solvedbi_sm_c(ah, a, rho, axis)
if have_numexpr:
cb = inner(c, b, axis=axis)
return ne.evaluate('(b - (a * cb)) / rho')
else:
return (b - (a * inner(c, b, axis=axis))) / rho |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solvedbd_sm(ah, d, b, c=None, axis=4):
r""" Solve a diagonal block linear system with a diagonal term using the Sherman-Morrison equation. The solution is obtained by independently solving a set of linear systems of the form (see :cite:`wohlberg-2016-efficient`) .. math:: (\mathbf{d} + \mathbf{a} \mathbf{a}^H ) \; \mathbf{x} = \mathbf{b} \;\;. In this equation inner products and matrix products are taken along the specified axis of the corresponding multi-dimensional arrays; the solutions are independent over the other axes. Parameters ah : array_like Linear system component :math:`\mathbf{a}^H` d : array_like Linear system parameter :math:`\mathbf{d}` b : array_like Linear system component :math:`\mathbf{b}` c : array_like, optional (default None) Solution component :math:`\mathbf{c}` that may be pre-computed using :func:`solvedbd_sm_c` and cached for re-use. axis : int, optional (default 4) Axis along which to solve the linear system Returns ------- x : ndarray Linear system solution :math:`\mathbf{x}` """ |
a = np.conj(ah)
if c is None:
c = solvedbd_sm_c(ah, a, d, axis)
if have_numexpr:
cb = inner(c, b, axis=axis)
return ne.evaluate('(b - (a * cb)) / d')
else:
return (b - (a * inner(c, b, axis=axis))) / d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Gax(x, ax):
""" Compute gradient of `x` along axis `ax`. Parameters x : array_like Input array ax : int Axis on which gradient is to be computed Returns ------- xg : ndarray Output array """ |
slc = (slice(None),)*ax + (slice(-1, None),)
xg = np.roll(x, -1, axis=ax) - x
xg[slc] = 0.0
return xg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GTax(x, ax):
""" Compute transpose of gradient of `x` along axis `ax`. Parameters x : array_like Input array ax : int Axis on which gradient transpose is to be computed Returns ------- xg : ndarray Output array """ |
slc0 = (slice(None),) * ax
xg = np.roll(x, 1, axis=ax) - x
xg[slc0 + (slice(0, 1),)] = -x[slc0 + (slice(0, 1),)]
xg[slc0 + (slice(-1, None),)] = x[slc0 + (slice(-2, -1),)]
return xg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GradientFilters(ndim, axes, axshp, dtype=None):
r""" Construct a set of filters for computing gradients in the frequency domain. Parameters ndim : integer Total number of dimensions in array in which gradients are to be computed axes : tuple of integers Axes on which gradients are to be computed axshp : tuple of integers Shape of axes on which gradients are to be computed dtype : dtype Data type of output arrays Returns ------- Gf : ndarray Frequency domain gradient operators :math:`\hat{G}_i` GHGf : ndarray Sum of products :math:`\sum_i \hat{G}_i^H \hat{G}_i` """ |
if dtype is None:
dtype = np.float32
g = np.zeros([2 if k in axes else 1 for k in range(ndim)] +
[len(axes),], dtype)
for k in axes:
g[(0,) * k + (slice(None),) + (0,) * (g.ndim - 2 - k) + (k,)] = \
np.array([1, -1])
Gf = rfftn(g, axshp, axes=axes)
GHGf = np.sum(np.conj(Gf) * Gf, axis=-1).real
return Gf, GHGf |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def promote16(u, fn=None, *args, **kwargs):
r""" Utility function for use with functions that do not support arrays of dtype ``np.float16``. This function has two distinct modes of operation. If called with only the `u` parameter specified, the returned value is either `u` itself if `u` is not of dtype ``np.float16``, or `u` promoted to ``np.float32`` dtype if it is. If the function parameter `fn` is specified then `u` is conditionally promoted as described above, passed as the first argument to function `fn`, and the returned values are converted back to dtype ``np.float16`` if `u` is of that dtype. Note that if parameter `fn` is specified, it may not be be specified as a keyword argument if it is followed by any non-keyword arguments. Parameters u : array_like Array to be promoted to np.float32 if it is of dtype ``np.float16`` fn : function or None, optional (default None) Function to be called with promoted `u` as first parameter and \*args and \*\*kwargs as additional parameters *args Variable length list of arguments for function `fn` **kwargs Keyword arguments for function `fn` Returns ------- up : ndarray Conditionally dtype-promoted version of `u` if `fn` is None, or value(s) returned by `fn`, converted to the same dtype as `u`, if `fn` is a function """ |
dtype = np.float32 if u.dtype == np.float16 else u.dtype
up = np.asarray(u, dtype=dtype)
if fn is None:
return up
else:
v = fn(up, *args, **kwargs)
if isinstance(v, tuple):
vp = tuple([np.asarray(vk, dtype=u.dtype) for vk in v])
else:
vp = np.asarray(v, dtype=u.dtype)
return vp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def LaplaceCentreWeight(self):
"""Centre weighting matrix for TV Laplacian.""" |
sz = [1,] * self.S.ndim
for ax in self.axes:
sz[ax] = self.S.shape[ax]
lcw = 2*len(self.axes)*np.ones(sz, dtype=self.dtype)
for ax in self.axes:
lcw[(slice(None),)*ax + ([0, -1],)] -= 1.0
return lcw |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def GaussSeidelStep(self, S, X, ATYU, rho, lcw, W2):
"""Gauss-Seidel step for linear system in TV problem.""" |
Xss = np.zeros_like(S, dtype=self.dtype)
for ax in self.axes:
Xss += sl.zpad(X[(slice(None),)*ax + (slice(0, -1),)],
(1, 0), ax)
Xss += sl.zpad(X[(slice(None),)*ax + (slice(1, None),)],
(0, 1), ax)
return (rho*(Xss + ATYU) + W2*S) / (W2 + rho*lcw) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runtime(self):
"""Transitional property providing access to the new timer mechanism. This will be removed in the future. """ |
warnings.warn("admm.ADMM.runtime attribute has been replaced by "
"an upgraded timer class: please see the documentation "
"for admm.ADMM.solve method and util.Timer class",
PendingDeprecationWarning)
return self.timer.elapsed('init') + self.timer.elapsed('solve') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ustep(self):
"""Dual variable update.""" |
self.U += self.rsdl_r(self.AX, self.Y) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_rho(self, k, r, s):
"""Automatic rho adjustment.""" |
if self.opt['AutoRho', 'Enabled']:
tau = self.rho_tau
mu = self.rho_mu
xi = self.rho_xi
if k != 0 and np.mod(k + 1, self.opt['AutoRho', 'Period']) == 0:
if self.opt['AutoRho', 'AutoScaling']:
if s == 0.0 or r == 0.0:
rhomlt = tau
else:
rhomlt = np.sqrt(r / (s * xi) if r > s * xi else
(s * xi) / r)
if rhomlt > tau:
rhomlt = tau
else:
rhomlt = tau
rsf = 1.0
if r > xi * mu * s:
rsf = rhomlt
elif s > (mu / xi) * r:
rsf = 1.0 / rhomlt
self.rho *= self.dtype.type(rsf)
self.U /= rsf
if rsf != 1.0:
self.rhochange() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def display_status(self, fmtstr, itst):
"""Display current iteration status as selection of fields from iteration stats tuple. """ |
if self.opt['Verbose']:
hdrtxt = type(self).hdrtxt()
hdrval = type(self).hdrval()
itdsp = tuple([getattr(itst, hdrval[col]) for col in hdrtxt])
if not self.opt['AutoRho', 'Enabled']:
itdsp = itdsp[0:-1]
print(fmtstr % itdsp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rsdl_sn(self, U):
"""Compute dual residual normalisation term. Overriding this method is required if methods :meth:`cnst_A`, :meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not overridden. """ |
return self.rho * np.linalg.norm(self.cnst_AT(U)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getmin(self):
"""Get minimiser after optimisation.""" |
if self.opt['ReturnVar'] == 'X':
return self.var_x()
elif self.opt['ReturnVar'] == 'Y0':
return self.var_y0()
elif self.opt['ReturnVar'] == 'Y1':
return self.var_y1()
else:
raise ValueError(self.opt['ReturnVar'] + ' is not a valid value'
'for option ReturnVar') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cbpdnmsk_class_label_lookup(label):
"""Get a ConvBPDNMask class from a label string.""" |
clsmod = {'admm': admm_cbpdn.ConvBPDNMaskDcpl,
'fista': fista_cbpdn.ConvBPDNMask}
if label in clsmod:
return clsmod[label]
else:
raise ValueError('Unknown ConvBPDNMask solver method %s' % label) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ConvBPDNMaskOptionsDefaults(method='admm'):
"""Get defaults dict for the ConvBPDNMask class specified by the ``method`` parameter. """ |
dflt = copy.deepcopy(cbpdnmsk_class_label_lookup(method).Options.defaults)
if method == 'admm':
dflt.update({'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
else:
dflt.update({'MaxMainIter': 1, 'BackTrack':
{'gamma_u': 1.2, 'MaxIter': 50}})
return dflt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ccmodmsk_class_label_lookup(label):
"""Get a ConvCnstrMODMask class from a label string.""" |
clsmod = {'ism': admm_ccmod.ConvCnstrMODMaskDcpl_IterSM,
'cg': admm_ccmod.ConvCnstrMODMaskDcpl_CG,
'cns': admm_ccmod.ConvCnstrMODMaskDcpl_Consensus,
'fista': fista_ccmod.ConvCnstrMODMask}
if label in clsmod:
return clsmod[label]
else:
raise ValueError('Unknown ConvCnstrMODMask solver method %s' % label) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ConvCnstrMODMaskOptionsDefaults(method='fista'):
"""Get defaults dict for the ConvCnstrMODMask class specified by the ``method`` parameter. """ |
dflt = copy.deepcopy(ccmodmsk_class_label_lookup(method).Options.defaults)
if method == 'fista':
dflt.update({'MaxMainIter': 1, 'BackTrack':
{'gamma_u': 1.2, 'MaxIter': 50}})
else:
dflt.update({'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
return dflt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pathsplit(pth, dropext=True):
"""Split a path into a tuple of all of its components.""" |
if dropext:
pth = os.path.splitext(pth)[0]
parts = os.path.split(pth)
if parts[0] == '':
return parts[1:]
elif len(parts[0]) == 1:
return parts
else:
return pathsplit(parts[0], dropext=False) + parts[1:] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_required(srcpth, dstpth):
""" If the file at `dstpth` is generated from the file at `srcpth`, determine whether an update is required. Returns True if `dstpth` does not exist, or if `srcpth` has been more recently modified than `dstpth`. """ |
return not os.path.exists(dstpth) or \
os.stat(srcpth).st_mtime > os.stat(dstpth).st_mtime |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_sphinx_environment(pth):
"""Read the sphinx environment.pickle file at path `pth`.""" |
with open(pth, 'rb') as fo:
env = pickle.load(fo)
return env |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_rst_index(rstpth):
""" Parse the top-level RST index file, at `rstpth`, for the example python scripts. Returns a list of subdirectories in order of appearance in the index file, and a dict mapping subdirectory name to a description. """ |
pthidx = {}
pthlst = []
with open(rstpth) as fd:
lines = fd.readlines()
for i, l in enumerate(lines):
if i > 0:
if re.match(r'^ \w+', l) is not None and \
re.match(r'^\w+', lines[i - 1]) is not None:
# List of subdirectories in order of appearance in index.rst
pthlst.append(lines[i - 1][:-1])
# Dict mapping subdirectory name to description
pthidx[lines[i - 1][:-1]] = l[2:-1]
return pthlst, pthidx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def preprocess_script_string(str):
""" Process python script represented as string `str` in preparation for conversion to a notebook. This processing includes removal of the header comment, modification of the plotting configuration, and replacement of certain sphinx cross-references with appropriate links to online docs. """ |
# Remove header comment
str = re.sub(r'^(#[^#\n]+\n){5}\n*', r'', str)
# Insert notebook plotting configuration function
str = re.sub(r'from sporco import plot', r'from sporco import plot'
'\nplot.config_notebook_plotting()',
str, flags=re.MULTILINE)
# Remove final input statement and preceding comment
str = re.sub(r'\n*# Wait for enter on keyboard.*\ninput().*\n*',
r'', str, flags=re.MULTILINE)
return str |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def script_string_to_notebook(str, pth):
""" Convert a python script represented as string `str` to a notebook with filename `pth`. """ |
nb = py2jn.py_string_to_notebook(str)
py2jn.write_notebook(nb, pth) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def script_to_notebook(spth, npth, cr):
""" Convert the script at `spth` to a notebook at `npth`. Parameter `cr` is a CrossReferenceLookup object. """ |
# Read entire text of example script
with open(spth) as f:
stxt = f.read()
# Process script text
stxt = preprocess_script_string(stxt)
# If the notebook file exists and has been executed, try to
# update markdown cells without deleting output cells
if os.path.exists(npth) and notebook_executed(npth):
# Read current notebook file
nbold = nbformat.read(npth, as_version=4)
# Construct updated notebook
nbnew = script_string_to_notebook_object(stxt)
if cr is not None:
notebook_substitute_ref_with_url(nbnew, cr)
# If the code cells of the two notebooks match, try to
# update markdown cells without deleting output cells
if same_notebook_code(nbnew, nbold):
try:
replace_markdown_cells(nbnew, nbold)
except Exception:
script_string_to_notebook_with_links(stxt, npth, cr)
else:
with open(npth, 'wt') as f:
nbformat.write(nbold, f)
else:
# Write changed text to output notebook file
script_string_to_notebook_with_links(stxt, npth, cr)
else:
# Write changed text to output notebook file
script_string_to_notebook_with_links(stxt, npth, cr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def script_string_to_notebook_with_links(str, pth, cr=None):
""" Convert a python script represented as string `str` to a notebook with filename `pth` and replace sphinx cross-references with links to online docs. Parameter `cr` is a CrossReferenceLookup object. """ |
if cr is None:
script_string_to_notebook(str, pth)
else:
ntbk = script_string_to_notebook_object(str)
notebook_substitute_ref_with_url(ntbk, cr)
with open(pth, 'wt') as f:
nbformat.write(ntbk, f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rst_to_notebook(infile, outfile):
"""Convert an rst file to a notebook file.""" |
# Read infile into a string
with open(infile, 'r') as fin:
rststr = fin.read()
# Convert string from rst to markdown
mdfmt = 'markdown_github+tex_math_dollars+fenced_code_attributes'
mdstr = pypandoc.convert_text(rststr, mdfmt, format='rst',
extra_args=['--atx-headers'])
# In links, replace .py extensions with .ipynb
mdstr = re.sub(r'\(([^\)]+).py\)', r'(\1.ipynb)', mdstr)
# Enclose the markdown within triple quotes and convert from
# python to notebook
mdstr = '"""' + mdstr + '"""'
nb = py2jn.py_string_to_notebook(mdstr)
py2jn.tools.write_notebook(nb, outfile, nbver=4) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def markdown_to_notebook(infile, outfile):
"""Convert a markdown file to a notebook file.""" |
# Read infile into a string
with open(infile, 'r') as fin:
str = fin.read()
# Enclose the markdown within triple quotes and convert from
# python to notebook
str = '"""' + str + '"""'
nb = py2jn.py_string_to_notebook(str)
py2jn.tools.write_notebook(nb, outfile, nbver=4) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rst_to_docs_rst(infile, outfile):
"""Convert an rst file to a sphinx docs rst file.""" |
# Read infile into a list of lines
with open(infile, 'r') as fin:
rst = fin.readlines()
# Inspect outfile path components to determine whether outfile
# is in the root of the examples directory or in a subdirectory
# thererof
ps = pathsplit(outfile)[-3:]
if ps[-2] == 'examples':
ps = ps[-2:]
idx = 'index'
else:
idx = ''
# Output string starts with a cross-reference anchor constructed from
# the file name and path
out = '.. _' + '_'.join(ps) + ':\n\n'
# Iterate over lines from infile
it = iter(rst)
for line in it:
if line[0:12] == '.. toc-start': # Line has start of toc marker
# Initialise current toc array and iterate over lines until
# end of toc marker encountered
toc = []
for line in it:
if line == '\n': # Drop newline lines
continue
elif line[0:10] == '.. toc-end': # End of toc marker
# Add toctree section to output string
out += '.. toctree::\n :maxdepth: 1\n\n'
for c in toc:
out += ' %s <%s>\n' % c
break
else: # Still within toc section
# Extract link text and target url and append to
# toc array
m = re.search(r'`(.*?)\s*<(.*?)(?:.py)?>`', line)
if m:
if idx == '':
toc.append((m.group(1), m.group(2)))
else:
toc.append((m.group(1),
os.path.join(m.group(2), idx)))
else: # Not within toc section
out += line
with open(outfile, 'w') as fout:
fout.write(out) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_notebook_index(ntbkpth):
""" Parse the top-level notebook index file at `ntbkpth`. Returns a list of subdirectories in order of appearance in the index file, and a dict mapping subdirectory name to a description. """ |
# Convert notebook to RST text in string
rex = RSTExporter()
rsttxt = rex.from_filename(ntbkpth)[0]
# Clean up trailing whitespace
rsttxt = re.sub(r'\n ', r'', rsttxt, re.M | re.S)
pthidx = {}
pthlst = []
lines = rsttxt.split('\n')
for l in lines:
m = re.match(r'^-\s+`([^<]+)\s+<([^>]+).ipynb>`__', l)
if m:
# List of subdirectories in order of appearance in index.rst
pthlst.append(m.group(2))
# Dict mapping subdirectory name to description
pthidx[m.group(2)] = m.group(1)
return pthlst, pthidx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def construct_notebook_index(title, pthlst, pthidx):
""" Construct a string containing a markdown format index for the list of paths in `pthlst`. The title for the index is in `title`, and `pthidx` is a dict giving label text for each path. """ |
# Insert title text
txt = '"""\n## %s\n"""\n\n"""' % title
# Insert entry for each item in pthlst
for pth in pthlst:
# If pth refers to a .py file, replace .py with .ipynb, otherwise
# assume it's a directory name and append '/index.ipynb'
if pth[-3:] == '.py':
link = os.path.splitext(pth)[0] + '.ipynb'
else:
link = os.path.join(pth, 'index.ipynb')
txt += '- [%s](%s)\n' % (pthidx[pth], link)
txt += '"""'
return txt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def notebook_executed(pth):
"""Determine whether the notebook at `pth` has been executed.""" |
nb = nbformat.read(pth, as_version=4)
for n in range(len(nb['cells'])):
if nb['cells'][n].cell_type == 'code' and \
nb['cells'][n].execution_count is None:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def same_notebook_code(nb1, nb2):
""" Return true of the code cells of notebook objects `nb1` and `nb2` are the same. """ |
# Notebooks do not match of the number of cells differ
if len(nb1['cells']) != len(nb2['cells']):
return False
# Iterate over cells in nb1
for n in range(len(nb1['cells'])):
# Notebooks do not match if corresponding cells have different
# types
if nb1['cells'][n]['cell_type'] != nb2['cells'][n]['cell_type']:
return False
# Notebooks do not match if source of corresponding code cells
# differ
if nb1['cells'][n]['cell_type'] == 'code' and \
nb1['cells'][n]['source'] != nb2['cells'][n]['source']:
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute_notebook(npth, dpth, timeout=1200, kernel='python3'):
""" Execute the notebook at `npth` using `dpth` as the execution directory. The execution timeout and kernel are `timeout` and `kernel` respectively. """ |
ep = ExecutePreprocessor(timeout=timeout, kernel_name=kernel)
nb = nbformat.read(npth, as_version=4)
t0 = timer()
ep.preprocess(nb, {'metadata': {'path': dpth}})
t1 = timer()
with open(npth, 'wt') as f:
nbformat.write(nb, f)
return t1 - t0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace_markdown_cells(src, dst):
""" Overwrite markdown cells in notebook object `dst` with corresponding cells in notebook object `src`. """ |
# It is an error to attempt markdown replacement if src and dst
# have different numbers of cells
if len(src['cells']) != len(dst['cells']):
raise ValueError('notebooks do not have the same number of cells')
# Iterate over cells in src
for n in range(len(src['cells'])):
# It is an error to attempt markdown replacement if any
# corresponding pair of cells have different type
if src['cells'][n]['cell_type'] != dst['cells'][n]['cell_type']:
raise ValueError('cell number %d of different type in src and dst')
# If current src cell is a markdown cell, copy the src cell to
# the dst cell
if src['cells'][n]['cell_type'] == 'markdown':
dst['cells'][n]['source'] = src['cells'][n]['source'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def notebook_substitute_ref_with_url(ntbk, cr):
""" In markdown cells of notebook object `ntbk`, replace sphinx cross-references with links to online docs. Parameter `cr` is a CrossReferenceLookup object. """ |
# Iterate over cells in notebook
for n in range(len(ntbk['cells'])):
# Only process cells of type 'markdown'
if ntbk['cells'][n]['cell_type'] == 'markdown':
# Get text of markdown cell
txt = ntbk['cells'][n]['source']
# Replace links to online docs with sphinx cross-references
txt = cr.substitute_ref_with_url(txt)
# Replace current cell text with processed text
ntbk['cells'][n]['source'] = txt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def preprocess_notebook(ntbk, cr):
""" Process notebook object `ntbk` in preparation for conversion to an rst document. This processing replaces links to online docs with corresponding sphinx cross-references within the local docs. Parameter `cr` is a CrossReferenceLookup object. """ |
# Iterate over cells in notebook
for n in range(len(ntbk['cells'])):
# Only process cells of type 'markdown'
if ntbk['cells'][n]['cell_type'] == 'markdown':
# Get text of markdown cell
txt = ntbk['cells'][n]['source']
# Replace links to online docs with sphinx cross-references
txt = cr.substitute_url_with_ref(txt)
# Replace current cell text with processed text
ntbk['cells'][n]['source'] = txt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_notebook_rst(txt, res, fnm, pth):
""" Write the converted notebook text `txt` and resources `res` to filename `fnm` in directory `pth`. """ |
# Extended filename used for output images
extfnm = fnm + '_files'
# Directory into which output images are written
extpth = os.path.join(pth, extfnm)
# Make output image directory if it doesn't exist
mkdir(extpth)
# Iterate over output images in resources dict
for r in res['outputs'].keys():
# New name for current output image
rnew = re.sub('output', fnm, r)
# Partial path for current output image
rpth = os.path.join(extfnm, rnew)
# In RST text, replace old output name with the new one
txt = re.sub('\.\. image:: ' + r, '.. image:: ' + rpth, txt, re.M)
# Full path of the current output image
fullrpth = os.path.join(pth, rpth)
# Write the current output image to disk
with open(fullrpth, 'wb') as fo:
fo.write(res['outputs'][r])
# Remove trailing whitespace in RST text
txt = re.sub(r'[ \t]+$', '', txt, flags=re.M)
# Write RST text to disk
with open(os.path.join(pth, fnm + '.rst'), 'wt') as fo:
fo.write(txt) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def notebook_to_rst(npth, rpth, rdir, cr=None):
""" Convert notebook at `npth` to rst document at `rpth`, in directory `rdir`. Parameter `cr` is a CrossReferenceLookup object. """ |
# Read the notebook file
ntbk = nbformat.read(npth, nbformat.NO_CONVERT)
# Convert notebook object to rstpth
notebook_object_to_rst(ntbk, rpth, rdir, cr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def notebook_object_to_rst(ntbk, rpth, cr=None):
""" Convert notebook object `ntbk` to rst document at `rpth`, in directory `rdir`. Parameter `cr` is a CrossReferenceLookup object. """ |
# Parent directory of file rpth
rdir = os.path.dirname(rpth)
# File basename
rb = os.path.basename(os.path.splitext(rpth)[0])
# Pre-process notebook prior to conversion to rst
if cr is not None:
preprocess_notebook(ntbk, cr)
# Convert notebook to rst
rex = RSTExporter()
rsttxt, rstres = rex.from_notebook_node(ntbk)
# Replace `` with ` in sphinx cross-references
rsttxt = re.sub(r':([^:]+):``(.*?)``', r':\1:`\2`', rsttxt)
# Insert a cross-reference target at top of file
reflbl = '.. _examples_' + os.path.basename(rdir) + '_' + \
rb.replace('-', '_') + ':\n'
rsttxt = reflbl + rsttxt
# Write the converted rst to disk
write_notebook_rst(rsttxt, rstres, rb, rdir) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_example_scripts_docs(spth, npth, rpth):
""" Generate rst docs from example scripts. Arguments `spth`, `npth`, and `rpth` are the top-level scripts directory, the top-level notebooks directory, and the top-level output directory within the docs respectively. """ |
# Ensure that output directory exists
mkdir(rpth)
# Iterate over index files
for fp in glob(os.path.join(spth, '*.rst')) + \
glob(os.path.join(spth, '*', '*.rst')):
# Index basename
b = os.path.basename(fp)
# Index dirname
dn = os.path.dirname(fp)
# Name of subdirectory of examples directory containing current index
sd = os.path.split(dn)
# Set d to the name of the subdirectory of the root directory
if dn == spth: # fp is the root directory index file
d = ''
else: # fp is a subdirectory index file
d = sd[-1]
# Path to corresponding subdirectory in docs directory
fd = os.path.join(rpth, d)
# Ensure notebook subdirectory exists
mkdir(fd)
# Filename of index file to be constructed
fn = os.path.join(fd, b)
# Process current index file if corresponding notebook file
# doesn't exist, or is older than index file
if update_required(fp, fn):
print('Converting %s ' % os.path.join(d, b),
end='\r')
# Convert script index to docs index
rst_to_docs_rst(fp, fn)
# Iterate over example scripts
for fp in sorted(glob(os.path.join(spth, '*', '*.py'))):
# Name of subdirectory of examples directory containing current script
d = os.path.split(os.path.dirname(fp))[1]
# Script basename
b = os.path.splitext(os.path.basename(fp))[0]
# Path to corresponding notebook
fn = os.path.join(npth, d, b + '.ipynb')
# Path to corresponding sphinx doc file
fr = os.path.join(rpth, d, b + '.rst')
# Only proceed if script and notebook exist
if os.path.exists(fp) and os.path.exists(fn):
# Convert notebook to rst if notebook is newer than rst
# file or if rst file doesn't exist
if update_required(fn, fr):
fnb = os.path.join(d, b + '.ipynb')
print('Processing %s ' % fnb, end='\r')
script_and_notebook_to_rst(fp, fn, fr)
else:
print('WARNING: script %s or notebook %s not found' %
(fp, fn)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_full_name(self, role, name):
""" If ``name`` is already the full name of an object, return ``name``. Otherwise, if ``name`` is a partial object name, look up the full name and return it. """ |
# An initial '.' indicates a partial name
if name[0] == '.':
# Find matches for the partial name in the string
# containing all full names for this role
ptrn = r'(?<= )[^,]*' + name + r'(?=,)'
ml = re.findall(ptrn, self.rolnam[role])
# Handle cases depending on the number of returned matches,
# raising an error if exactly one match is not found
if len(ml) == 0:
raise KeyError('name matching %s not found' % name,
'name', len(ml))
elif len(ml) > 1:
raise KeyError('multiple names matching %s found' % name,
'name', len(ml))
else:
return ml[0]
else:
# The absence of an initial '.' indicates a full
# name. Return the name if it is present in the inventory,
# otherwise raise an error
try:
dom = IntersphinxInventory.roledomain[role]
except KeyError:
raise KeyError('role %s not found' % role, 'role', 0)
if name in self.inv[dom]:
return name
else:
raise KeyError('name %s not found' % name, 'name', 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def matching_base_url(self, url):
""" Return True if the initial part of `url` matches the base url passed to the initialiser of this object, and False otherwise. """ |
n = len(self.baseurl)
return url[0:n] == self.baseurl |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inventory_maps(inv):
""" Construct dicts facilitating information lookup in an inventory dict. A reversed dict allows lookup of a tuple specifying the sphinx cross-reference role and the name of the referenced type from the intersphinx inventory url postfix string. A role-specific name lookup string allows the set of all names corresponding to a specific role to be searched via regex. """ |
# Initialise dicts
revinv = {}
rolnam = {}
# Iterate over domain keys in inventory dict
for d in inv:
# Since keys seem to be duplicated, ignore those not
# starting with 'py:'
if d[0:3] == 'py:' and d in IntersphinxInventory.domainrole:
# Get role corresponding to current domain
r = IntersphinxInventory.domainrole[d]
# Initialise role-specific name lookup string
rolnam[r] = ''
# Iterate over all type names for current domain
for n in inv[d]:
# Get the url postfix string for the current
# domain and type name
p = inv[d][n][2]
# Allow lookup of role and object name tuple from
# url postfix
revinv[p] = (r, n)
# Append object name to a string for this role,
# allowing regex searching for partial names
rolnam[r] += ' ' + n + ','
return revinv, rolnam |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_docs_label(self, role, name):
"""Get an appropriate label to use in a link to the online docs.""" |
if role == 'cite':
# Get the string used as the citation label in the text
try:
cstr = self.env.bibtex_cache.get_label_from_key(name)
except Exception:
raise KeyError('cite key %s not found' % name, 'cite', 0)
# The link label is the citation label (number) enclosed
# in square brackets
return '[%s]' % cstr
elif role == 'ref':
try:
reftpl = self.env.domaindata['std']['labels'][name]
except Exception:
raise KeyError('ref label %s not found' % name, 'ref', 0)
return reftpl[2]
else:
# Use the object name as a label, omiting any initial '.'
if name[0] == '.':
return name[1:]
else:
return name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def substitute_ref_with_url(self, txt):
""" In the string `txt`, replace sphinx references with corresponding links to online docs. """ |
# Find sphinx cross-references
mi = re.finditer(r':([^:]+):`([^`]+)`', txt)
if mi:
# Iterate over match objects in iterator returned by re.finditer
for mo in mi:
# Initialize link label and url for substitution
lbl = None
url = None
# Get components of current match: full matching text, the
# role label in the reference, and the name of the
# referenced type
mtxt = mo.group(0)
role = mo.group(1)
name = mo.group(2)
# If role is 'ref', the name component is in the form
# label <name>
if role == 'ref':
ma = re.match(r'\s*([^\s<]+)\s*<([^>]+)+>', name)
if ma:
name = ma.group(2)
lbl = ma.group(1)
# Try to look up the current cross-reference. Issue a
# warning if the lookup fails, and do the substitution
# if it succeeds.
try:
url = self.get_docs_url(role, name)
if role != 'ref':
lbl = self.get_docs_label(role, name)
except KeyError as ex:
if len(ex.args) == 1 or ex.args[1] != 'role':
print('Warning: %s' % ex.args[0])
else:
# If the cross-reference lookup was successful, replace
# it with an appropriate link to the online docs
rtxt = '[%s](%s)' % (lbl, url)
txt = re.sub(mtxt, rtxt, txt, flags=re.M)
return txt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def substitute_url_with_ref(self, txt):
""" In the string `txt`, replace links to online docs with corresponding sphinx cross-references. """ |
# Find links
mi = re.finditer(r'\[([^\]]+|\[[^\]]+\])\]\(([^\)]+)\)', txt)
if mi:
# Iterate over match objects in iterator returned by
# re.finditer
for mo in mi:
# Get components of current match: full matching text,
# the link label, and the postfix to the base url in the
# link url
mtxt = mo.group(0)
lbl = mo.group(1)
url = mo.group(2)
# Try to look up the current link url. Issue a warning if
# the lookup fails, and do the substitution if it succeeds.
try:
ref = self.get_sphinx_ref(url, lbl)
except KeyError as ex:
print('Warning: %s' % ex.args[0])
else:
txt = re.sub(re.escape(mtxt), ref, txt)
return txt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def obfn_fvarf(self):
"""Variable to be evaluated in computing data fidelity term, depending on 'fEvalX' option value. """ |
return self.Xf if self.opt['fEvalX'] else \
sl.rfftn(self.Y, None, self.cri.axisN) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rsdl(self):
"""Compute fixed point residual in Fourier domain.""" |
diff = self.Xf - self.Yfprv
return sl.rfl2norm2(diff, self.X.shape, axis=self.cri.axisN) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cbpdn_class_label_lookup(label):
"""Get a CBPDN class from a label string.""" |
clsmod = {'admm': admm_cbpdn.ConvBPDN,
'fista': fista_cbpdn.ConvBPDN}
if label in clsmod:
return clsmod[label]
else:
raise ValueError('Unknown ConvBPDN solver method %s' % label) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ConvBPDNOptionsDefaults(method='admm'):
"""Get defaults dict for the ConvBPDN class specified by the ``method`` parameter. """ |
dflt = copy.deepcopy(cbpdn_class_label_lookup(method).Options.defaults)
if method == 'admm':
dflt.update({'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
else:
dflt.update({'MaxMainIter': 1, 'BackTrack':
{'gamma_u': 1.2, 'MaxIter': 50}})
return dflt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ccmod_class_label_lookup(label):
"""Get a CCMOD class from a label string.""" |
clsmod = {'ism': admm_ccmod.ConvCnstrMOD_IterSM,
'cg': admm_ccmod.ConvCnstrMOD_CG,
'cns': admm_ccmod.ConvCnstrMOD_Consensus,
'fista': fista_ccmod.ConvCnstrMOD}
if label in clsmod:
return clsmod[label]
else:
raise ValueError('Unknown ConvCnstrMOD solver method %s' % label) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ConvCnstrMODOptionsDefaults(method='fista'):
"""Get defaults dict for the ConvCnstrMOD class specified by the ``method`` parameter. """ |
dflt = copy.deepcopy(ccmod_class_label_lookup(method).Options.defaults)
if method == 'fista':
dflt.update({'MaxMainIter': 1, 'BackTrack':
{'gamma_u': 1.2, 'MaxIter': 50}})
else:
dflt.update({'MaxMainIter': 1, 'AutoRho':
{'Period': 10, 'AutoScaling': False,
'RsdlRatio': 10.0, 'Scaling': 2.0,
'RsdlTarget': 1.0}})
return dflt |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def evaluate(self):
"""Evaluate functional value of previous iteration""" |
if self.opt['AccurateDFid']:
D = self.dstep.var_y()
X = self.xstep.var_y()
S = self.xstep.S
dfd = 0.5*np.linalg.norm((D.dot(X) - S))**2
rl1 = np.sum(np.abs(X))
return dict(DFid=dfd, RegL1=rl1, ObjFun=dfd+self.xstep.lmbda*rl1)
else:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_newer_than(pth1, pth2):
""" Return true if either file pth1 or file pth2 don't exist, or if pth1 has been modified more recently than pth2 """ |
return not os.path.exists(pth1) or not os.path.exists(pth2) or \
os.stat(pth1).st_mtime > os.stat(pth2).st_mtime |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mpraw_as_np(shape, dtype):
"""Construct a numpy array of the specified shape and dtype for which the underlying storage is a multiprocessing RawArray in shared memory. Parameters shape : tuple Shape of numpy array dtype : data-type Data type of array Returns ------- arr : ndarray Numpy array """ |
sz = int(np.product(shape))
csz = sz * np.dtype(dtype).itemsize
raw = mp.RawArray('c', csz)
return np.frombuffer(raw, dtype=dtype, count=sz).reshape(shape) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_mpraw(mpv, npv):
"""Set a global variable as a multiprocessing RawArray in shared memory with a numpy array wrapper and initialise its value. Parameters mpv : string Name of global variable to set npv : ndarray Numpy array to use as initialiser for global variable value """ |
globals()[mpv] = mpraw_as_np(npv.shape, npv.dtype)
globals()[mpv][:] = npv |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cbpdn_setdict():
"""Set the dictionary for the cbpdn stage. There are no parameters or return values because all inputs and outputs are from and to global variables. """ |
global mp_DSf
# Set working dictionary for cbpdn step and compute DFT of dictionary
# D and of D^T S
mp_Df[:] = sl.rfftn(mp_D_Y, mp_cri.Nv, mp_cri.axisN)
if mp_cri.Cd == 1:
mp_DSf[:] = np.conj(mp_Df) * mp_Sf
else:
mp_DSf[:] = sl.inner(np.conj(mp_Df[np.newaxis, ...]), mp_Sf,
axis=mp_cri.axisC+1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cbpdnmd_ustep(k):
"""Do the U step of the cbpdn stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ |
mp_Z_U0[k] += mp_DX[k] - mp_Z_Y0[k] - mp_S[k]
mp_Z_U1[k] += mp_Z_X[k] - mp_Z_Y1[k] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ccmodmd_relax(k):
"""Do relaxation for the ccmod stage. The only parameter is the slice index `k` and there are no return values; all inputs and outputs are from and to global variables. """ |
mp_D_X[k] = mp_drlx * mp_D_X[k] + (1 - mp_drlx) * mp_D_Y0
mp_DX[k] = mp_drlx * mp_DX[k] + (1 - mp_drlx) * (mp_D_Y1[k] + mp_S[k]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eval_grad(self):
"""Compute gradient in spatial domain for variable Y.""" |
# Compute D^T(D Y - S)
return self.D.T.dot(self.D.dot(self.Y) - self.S) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rsdl(self):
"""Compute fixed point residual.""" |
return np.linalg.norm((self.X - self.Yprv).ravel()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eval_Rf(self, Vf):
"""Evaluate smooth term in Vf.""" |
return sl.inner(self.Df, Vf, axis=self.cri.axisM) - self.Sf |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zpad(v, Nv):
"""Zero-pad initial axes of array to specified size. Padding is applied to the right, top, etc. of the array indices. Parameters v : array_like Array to be padded Nv : tuple Sizes to which each of initial indices should be padded Returns ------- vp : ndarray Padded array """ |
vp = np.zeros(Nv + v.shape[len(Nv):], dtype=v.dtype)
axnslc = tuple([slice(0, x) for x in v.shape])
vp[axnslc] = v
return vp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Pcn(x, dsz, Nv, dimN=2, dimC=1, crp=False, zm=False):
"""Constraint set projection for convolutional dictionary update problem. Parameters x : array_like Input array dsz : tuple Filter support size(s), specified using the same format as the `dsz` parameter of :func:`bcrop` Nv : tuple Sizes of problem spatial indices dimN : int, optional (default 2) Number of problem spatial indices dimC : int, optional (default 1) Number of problem channel indices crp : bool, optional (default False) Flag indicating whether the result should be cropped to the support of the largest filter in the dictionary. zm : bool, optional (default False) Flag indicating whether the projection function should include filter mean subtraction Returns ------- y : ndarray Projection of input onto constraint set """ |
if crp:
def zpadfn(x):
return x
else:
def zpadfn(x):
return zpad(x, Nv)
if zm:
def zmeanfn(x):
return zeromean(x, dsz, dimN)
else:
def zmeanfn(x):
return x
return normalise(zmeanfn(zpadfn(bcrop(x, dsz, dimN))), dimN + dimC) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.