code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def api_get(self, action, data, headers=None):
"""
Perform an HTTP GET request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
"""
return self._api_request(action, data, 'GET', headers) | Perform an HTTP GET request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values | Below is the the instruction that describes the task:
### Input:
Perform an HTTP GET request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
### Response:
def api_get(self, action, data, headers=None):
"""
Perform an HTTP GET request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
"""
return self._api_request(action, data, 'GET', headers) |
def _update_repo(self, juicer_repo, pulp_repo, env, repo_diff, query='/repositories/'):
"""
`from_file` - JSON file of repo definitions
`noop` - Boolean, if true don't actually create/update repos, just show what would have happened
https://pulp-dev-guide.readthedocs.org/en/pulp-2.3/integration/rest-api/repo/cud.html#update-a-distributor-associated-with-a-repository
https://pulp-dev-guide.readthedocs.org/en/pulp-2.3/integration/rest-api/repo/cud.html#update-an-importer-associated-with-a-repository
Distributor update:
Method: PUT
Path: /pulp/api/v2/repositories/<repo_id>/distributors/<distributor_id>/
Importer update:
Method: PUT
Path: /pulp/api/v2/repositories/<repo_id>/importers/<importer_id>/
"""
repo_id = "%s-%s" % (juicer_repo['name'], env)
distributor_id = "yum_distributor"
importer_id = "yum_importer"
distributor_diff = repo_diff.diff()['distributor']
importer_diff = repo_diff.diff()['importer']
distributor_query = query + "%s/distributors/%s/" % (repo_id, distributor_id)
importer_query = query + "%s/importers/%s/" % (repo_id, importer_id)
##############################################################
# Importer update
_r = self.connectors[env].put(distributor_query, distributor_diff)
if _r.status_code == Constants.PULP_PUT_OK:
juicer.utils.Log.log_notice("Update request accepted for %s", repo_id)
elif _r.status_code == Constants.PULP_PUT_CONFLICT:
juicer.utils.Log.log_debug(str(_r.content))
elif _r.status_code == Constants.PULP_PUT_NOT_FOUND:
juicer.utils.Log.log_debug(str(_r.content))
else:
_r.raise_for_status()
##############################################################
# Distributor update
_r = self.connectors[env].put(importer_query, importer_diff)
if _r.status_code == Constants.PULP_PUT_OK:
juicer.utils.Log.log_notice("Update request accepted for %s", repo_id)
elif _r.status_code == Constants.PULP_PUT_CONFLICT:
juicer.utils.Log.log_debug(str(_r.content))
elif _r.status_code == Constants.PULP_PUT_NOT_FOUND:
juicer.utils.Log.log_debug(str(_r.content))
else:
_r.raise_for_status()
return True | `from_file` - JSON file of repo definitions
`noop` - Boolean, if true don't actually create/update repos, just show what would have happened
https://pulp-dev-guide.readthedocs.org/en/pulp-2.3/integration/rest-api/repo/cud.html#update-a-distributor-associated-with-a-repository
https://pulp-dev-guide.readthedocs.org/en/pulp-2.3/integration/rest-api/repo/cud.html#update-an-importer-associated-with-a-repository
Distributor update:
Method: PUT
Path: /pulp/api/v2/repositories/<repo_id>/distributors/<distributor_id>/
Importer update:
Method: PUT
Path: /pulp/api/v2/repositories/<repo_id>/importers/<importer_id>/ | Below is the the instruction that describes the task:
### Input:
`from_file` - JSON file of repo definitions
`noop` - Boolean, if true don't actually create/update repos, just show what would have happened
https://pulp-dev-guide.readthedocs.org/en/pulp-2.3/integration/rest-api/repo/cud.html#update-a-distributor-associated-with-a-repository
https://pulp-dev-guide.readthedocs.org/en/pulp-2.3/integration/rest-api/repo/cud.html#update-an-importer-associated-with-a-repository
Distributor update:
Method: PUT
Path: /pulp/api/v2/repositories/<repo_id>/distributors/<distributor_id>/
Importer update:
Method: PUT
Path: /pulp/api/v2/repositories/<repo_id>/importers/<importer_id>/
### Response:
def _update_repo(self, juicer_repo, pulp_repo, env, repo_diff, query='/repositories/'):
"""
`from_file` - JSON file of repo definitions
`noop` - Boolean, if true don't actually create/update repos, just show what would have happened
https://pulp-dev-guide.readthedocs.org/en/pulp-2.3/integration/rest-api/repo/cud.html#update-a-distributor-associated-with-a-repository
https://pulp-dev-guide.readthedocs.org/en/pulp-2.3/integration/rest-api/repo/cud.html#update-an-importer-associated-with-a-repository
Distributor update:
Method: PUT
Path: /pulp/api/v2/repositories/<repo_id>/distributors/<distributor_id>/
Importer update:
Method: PUT
Path: /pulp/api/v2/repositories/<repo_id>/importers/<importer_id>/
"""
repo_id = "%s-%s" % (juicer_repo['name'], env)
distributor_id = "yum_distributor"
importer_id = "yum_importer"
distributor_diff = repo_diff.diff()['distributor']
importer_diff = repo_diff.diff()['importer']
distributor_query = query + "%s/distributors/%s/" % (repo_id, distributor_id)
importer_query = query + "%s/importers/%s/" % (repo_id, importer_id)
##############################################################
# Importer update
_r = self.connectors[env].put(distributor_query, distributor_diff)
if _r.status_code == Constants.PULP_PUT_OK:
juicer.utils.Log.log_notice("Update request accepted for %s", repo_id)
elif _r.status_code == Constants.PULP_PUT_CONFLICT:
juicer.utils.Log.log_debug(str(_r.content))
elif _r.status_code == Constants.PULP_PUT_NOT_FOUND:
juicer.utils.Log.log_debug(str(_r.content))
else:
_r.raise_for_status()
##############################################################
# Distributor update
_r = self.connectors[env].put(importer_query, importer_diff)
if _r.status_code == Constants.PULP_PUT_OK:
juicer.utils.Log.log_notice("Update request accepted for %s", repo_id)
elif _r.status_code == Constants.PULP_PUT_CONFLICT:
juicer.utils.Log.log_debug(str(_r.content))
elif _r.status_code == Constants.PULP_PUT_NOT_FOUND:
juicer.utils.Log.log_debug(str(_r.content))
else:
_r.raise_for_status()
return True |
def parent():
"""Determine subshell matching the currently running shell
The shell is determined by either a pre-defined BE_SHELL
environment variable, or, if none is found, via psutil
which looks at the parent process directly through
system-level calls.
For example, is `be` is run from cmd.exe, then the full
path to cmd.exe is returned, and the same goes for bash.exe
and bash (without suffix) for Unix environments.
The point is to return an appropriate subshell for the
running shell, as opposed to the currently running OS.
"""
if self._parent:
return self._parent
if "BE_SHELL" in os.environ:
self._parent = os.environ["BE_SHELL"]
else:
# If a shell is not provided, rely on `psutil`
# to look at the calling process name.
try:
import psutil
except ImportError:
raise ImportError(
"No shell provided, see documentation for "
"BE_SHELL for more information.\n"
"https://github.com/mottosso/be/wiki"
"/environment#read-environment-variables")
parent = psutil.Process(os.getpid()).parent()
# `pip install` creates an additional executable
# that tricks the above mechanism to think of it
# as the parent shell. See #34 for more.
if parent.name() in ("be", "be.exe"):
parent = parent.parent()
self._parent = str(parent.exe())
return self._parent | Determine subshell matching the currently running shell
The shell is determined by either a pre-defined BE_SHELL
environment variable, or, if none is found, via psutil
which looks at the parent process directly through
system-level calls.
For example, is `be` is run from cmd.exe, then the full
path to cmd.exe is returned, and the same goes for bash.exe
and bash (without suffix) for Unix environments.
The point is to return an appropriate subshell for the
running shell, as opposed to the currently running OS. | Below is the the instruction that describes the task:
### Input:
Determine subshell matching the currently running shell
The shell is determined by either a pre-defined BE_SHELL
environment variable, or, if none is found, via psutil
which looks at the parent process directly through
system-level calls.
For example, is `be` is run from cmd.exe, then the full
path to cmd.exe is returned, and the same goes for bash.exe
and bash (without suffix) for Unix environments.
The point is to return an appropriate subshell for the
running shell, as opposed to the currently running OS.
### Response:
def parent():
"""Determine subshell matching the currently running shell
The shell is determined by either a pre-defined BE_SHELL
environment variable, or, if none is found, via psutil
which looks at the parent process directly through
system-level calls.
For example, is `be` is run from cmd.exe, then the full
path to cmd.exe is returned, and the same goes for bash.exe
and bash (without suffix) for Unix environments.
The point is to return an appropriate subshell for the
running shell, as opposed to the currently running OS.
"""
if self._parent:
return self._parent
if "BE_SHELL" in os.environ:
self._parent = os.environ["BE_SHELL"]
else:
# If a shell is not provided, rely on `psutil`
# to look at the calling process name.
try:
import psutil
except ImportError:
raise ImportError(
"No shell provided, see documentation for "
"BE_SHELL for more information.\n"
"https://github.com/mottosso/be/wiki"
"/environment#read-environment-variables")
parent = psutil.Process(os.getpid()).parent()
# `pip install` creates an additional executable
# that tricks the above mechanism to think of it
# as the parent shell. See #34 for more.
if parent.name() in ("be", "be.exe"):
parent = parent.parent()
self._parent = str(parent.exe())
return self._parent |
def install_jspackage(package_name, version, modulesdir):
"""Installs a JavaScript package downloaded from npmjs.org.
For example to install React::
install_jspackage('react', '0.14.8', './node_modules')
To install last version provide `None` as the version.
"""
if not version:
version = ''
requirements = _resolve_dependencies(package_name, version)
print('Packages going to be installed: {0}'.format(', '.join(
'{0}->{1}'.format(*i) for i in requirements
)))
downloads = {}
for dependency_name, _, version_info in requirements:
try:
downloads[dependency_name] = version_info['dist']['tarball']
except KeyError:
raise JSPackageInstallError('Unable to detect a supported download url for package',
error_code=3)
for dependency_name, download_url in downloads.items():
tarball = BytesIO()
print('Fetching {0}'.format(download_url), end='')
with closing(urlopen(download_url)) as data:
chunk = data.read(1024)
while chunk:
print('.', end='')
tarball.write(chunk)
chunk = data.read(1024)
print('')
tarball.seek(0)
with closing(tarfile.open(fileobj=tarball)) as tb:
dest = os.path.join(modulesdir, dependency_name)
tmpdir = tempfile.mkdtemp()
try:
tb.extractall(tmpdir)
shutil.rmtree(os.path.abspath(dest), ignore_errors=True)
shutil.move(os.path.join(tmpdir, 'package'),
os.path.abspath(dest))
finally:
shutil.rmtree(tmpdir)
print('Installing {0} in {1} Done!'.format(package_name, modulesdir)) | Installs a JavaScript package downloaded from npmjs.org.
For example to install React::
install_jspackage('react', '0.14.8', './node_modules')
To install last version provide `None` as the version. | Below is the the instruction that describes the task:
### Input:
Installs a JavaScript package downloaded from npmjs.org.
For example to install React::
install_jspackage('react', '0.14.8', './node_modules')
To install last version provide `None` as the version.
### Response:
def install_jspackage(package_name, version, modulesdir):
"""Installs a JavaScript package downloaded from npmjs.org.
For example to install React::
install_jspackage('react', '0.14.8', './node_modules')
To install last version provide `None` as the version.
"""
if not version:
version = ''
requirements = _resolve_dependencies(package_name, version)
print('Packages going to be installed: {0}'.format(', '.join(
'{0}->{1}'.format(*i) for i in requirements
)))
downloads = {}
for dependency_name, _, version_info in requirements:
try:
downloads[dependency_name] = version_info['dist']['tarball']
except KeyError:
raise JSPackageInstallError('Unable to detect a supported download url for package',
error_code=3)
for dependency_name, download_url in downloads.items():
tarball = BytesIO()
print('Fetching {0}'.format(download_url), end='')
with closing(urlopen(download_url)) as data:
chunk = data.read(1024)
while chunk:
print('.', end='')
tarball.write(chunk)
chunk = data.read(1024)
print('')
tarball.seek(0)
with closing(tarfile.open(fileobj=tarball)) as tb:
dest = os.path.join(modulesdir, dependency_name)
tmpdir = tempfile.mkdtemp()
try:
tb.extractall(tmpdir)
shutil.rmtree(os.path.abspath(dest), ignore_errors=True)
shutil.move(os.path.join(tmpdir, 'package'),
os.path.abspath(dest))
finally:
shutil.rmtree(tmpdir)
print('Installing {0} in {1} Done!'.format(package_name, modulesdir)) |
def json(self, args=None):
"""Return a dictionary representation of the class.
Notes
-----
This is meant to be used by a third-party library wanting to wrap this class into another interface.
"""
names = ['identifier', 'abstract', 'keywords']
out = {key: getattr(self, key) for key in names}
out.update(self.cf_attrs)
out = self.format(out, args)
out['notes'] = self.notes
out['parameters'] = str({key: {'default': p.default if p.default != p.empty else None, 'desc': ''}
for (key, p) in self._sig.parameters.items()})
if six.PY2:
out = walk_map(out, lambda x: x.decode('utf8') if isinstance(x, six.string_types) else x)
return out | Return a dictionary representation of the class.
Notes
-----
This is meant to be used by a third-party library wanting to wrap this class into another interface. | Below is the the instruction that describes the task:
### Input:
Return a dictionary representation of the class.
Notes
-----
This is meant to be used by a third-party library wanting to wrap this class into another interface.
### Response:
def json(self, args=None):
"""Return a dictionary representation of the class.
Notes
-----
This is meant to be used by a third-party library wanting to wrap this class into another interface.
"""
names = ['identifier', 'abstract', 'keywords']
out = {key: getattr(self, key) for key in names}
out.update(self.cf_attrs)
out = self.format(out, args)
out['notes'] = self.notes
out['parameters'] = str({key: {'default': p.default if p.default != p.empty else None, 'desc': ''}
for (key, p) in self._sig.parameters.items()})
if six.PY2:
out = walk_map(out, lambda x: x.decode('utf8') if isinstance(x, six.string_types) else x)
return out |
def apply_bios_properties_filter(settings, filter_to_be_applied):
"""Applies the filter to return the dict of filtered BIOS properties.
:param settings: dict of BIOS settings on which filter to be applied.
:param filter_to_be_applied: list of keys to be applied as filter.
:returns: A dictionary of filtered BIOS settings.
"""
if not settings or not filter_to_be_applied:
return settings
return {k: settings[k] for k in filter_to_be_applied if k in settings} | Applies the filter to return the dict of filtered BIOS properties.
:param settings: dict of BIOS settings on which filter to be applied.
:param filter_to_be_applied: list of keys to be applied as filter.
:returns: A dictionary of filtered BIOS settings. | Below is the the instruction that describes the task:
### Input:
Applies the filter to return the dict of filtered BIOS properties.
:param settings: dict of BIOS settings on which filter to be applied.
:param filter_to_be_applied: list of keys to be applied as filter.
:returns: A dictionary of filtered BIOS settings.
### Response:
def apply_bios_properties_filter(settings, filter_to_be_applied):
"""Applies the filter to return the dict of filtered BIOS properties.
:param settings: dict of BIOS settings on which filter to be applied.
:param filter_to_be_applied: list of keys to be applied as filter.
:returns: A dictionary of filtered BIOS settings.
"""
if not settings or not filter_to_be_applied:
return settings
return {k: settings[k] for k in filter_to_be_applied if k in settings} |
def make_form_field(field, model=None, field_cls=None,
use_default_value=True,
builds_args_map=None):
"""
make form field according field value
:param field: such as: str, Form Field instance, dict
if field is str type, it'll fetch property from model or str is like 'model.name'
it'll fetch property `name` from `model`
:param model: if field is str type, it'll may use model value to fetch property
:param field_cls: if not applied, it'll fetch from property, field_cls sould be Field class
:param builds_args_map:
:return: Field instance
"""
import uliweb.form as form
from uliweb.form.validators import TEST_MAXLENGTH
builds_args_map = builds_args_map or {}
if model:
model = get_model(model)
if isinstance(field, BaseField):
return field
if isinstance(field, dict) and 'field' in field and isinstance(field['field'], BaseField):
return field['field']
field_type = None
prop = None #model property if existed
#process prop
if isinstance(field, (str, unicode)):
field = {'name':field}
if isinstance(field, dict):
prop = field.get('prop')
if not prop and model:
#if there is field_name, then use it property class
field_name = field.get('field_name') or field['name']
prop = get_model_property(model, field_name)
#if existed field_cls, then it'll use it to create form field
if not prop and not field_cls:
raise UliwebError("Can't find property %s in Model(%r)" %
(field_name, model.__name__))
else:
raise UliwebError("Can't support this field %r type in make_form_field" % type(field))
default_kwargs = {}
if prop:
default_kwargs = {
'label':prop.label or prop.property_name,
'help_string':prop.hint,
'placeholder':prop.placeholder,
'html_attrs':prop.extra.get('html_attrs', {}),
'required':prop.required,
}
kwargs = {
'label':field.get('label') or field.get('verbose_name') or default_kwargs.get('label'),
'help_string':field.get('hint') or field.get('help_string') or default_kwargs.get('help_string'),
'placeholder':field.get('placeholder') or default_kwargs.get('placeholder'),
'html_attrs':field.get('extra', {}).get('html_attrs', {}) or default_kwargs.get('html_attrs'),
'required':field.get('required', False) if 'required' in field else default_kwargs.get('required', False)
}
#add data-url support
if 'data-url' in field:
kwargs['html_attrs']['data-url'] = field['data-url']
if use_default_value:
v = prop.default_value()
kwargs['default'] = v
if field.get('static'):
field_type = form.StringField
kwargs['required'] = False
kwargs['static'] = True
if prop.choices is not None:
kwargs['choices'] = prop.get_choices()
if field.get('hidden'):
field_type = form.HiddenField
if field_cls:
field_type = field_cls
elif not field_type:
cls = prop.__class__
type_name = prop.type_name
if type_name == 'BLOG':
pass
elif type_name in ('TEXT', 'JSON'):
field_type = form.TextField
elif type_name in ('CHAR', 'VARCHAR', 'UUID'):
if prop.choices is not None:
field_type = form.SelectField
kwargs['choices'] = prop.get_choices()
else:
if cls.__name__ == 'FileProperty':
field_type = form.FileField
kwargs['upload_to'] = prop.upload_to
kwargs['upload_to_sub'] = prop.upload_to_sub
else:
field_type = form.UnicodeField
elif type_name == 'BOOL':
field_type = form.BooleanField
elif type_name == 'DATE':
# if not prop.auto_now and not prop.auto_now_add:
field_type = form.DateField
elif type_name == 'TIME':
# if not prop.auto_now and not prop.auto_now_add:
field_type = form.TimeField
elif type_name == 'DATETIME':
# if not prop.auto_now and not prop.auto_now_add:
field_type = form.DateTimeField
elif type_name == 'DECIMAL':
field_type = form.StringField
if prop.choices is not None:
field_type = form.SelectField
kwargs['choices'] = prop.get_choices()
elif type_name == 'FLOAT':
field_type = form.FloatField
elif type_name == 'INTEGER':
if prop.choices is not None:
field_type = form.SelectField
kwargs['choices'] = prop.get_choices()
kwargs['datetype'] = int
else:
field_type = form.IntField
elif type_name == 'ManyToMany':
kwargs['model'] = prop.reference_class
field_type = ManyToManySelectField
elif type_name in ('Reference', 'OneToOne'):
#field_type = form.IntField
kwargs['model'] = prop.reference_class
kwargs['value_field'] = prop.reference_fieldname
field_type = ReferenceSelectField
else:
raise ValueError("Can't support the Property [%s=%s]" %
(field['name'], prop.__class__.__name__))
if field_type:
build_args = builds_args_map.get(field_type, {})
#add settings.ini configure support
#so you could add options in settings.ini like this
# [GENERIC_FIELDS_MAPPING]
# FormFieldClassName = {'build':'model.NewFormFieldTypeClassName', **other args}
#
# e.g.
# [GENERIC_FIELDS_MAPPING]
# DateField = {'build':'jquery.widgets.DatePicker'}
if not build_args:
build_args = get_fileds_builds().get(field_type, {})
kwargs.update(build_args)
#add max_length validator
if issubclass(prop.__class__, (orm.StringProperty, orm.CharProperty, orm.UnicodeProperty)):
v = kwargs.setdefault('validators', [])
if isinstance(prop.max_length, int):
v.append(TEST_MAXLENGTH(prop.max_length))
f = field_type(**kwargs)
return f | make form field according field value
:param field: such as: str, Form Field instance, dict
if field is str type, it'll fetch property from model or str is like 'model.name'
it'll fetch property `name` from `model`
:param model: if field is str type, it'll may use model value to fetch property
:param field_cls: if not applied, it'll fetch from property, field_cls sould be Field class
:param builds_args_map:
:return: Field instance | Below is the the instruction that describes the task:
### Input:
make form field according field value
:param field: such as: str, Form Field instance, dict
if field is str type, it'll fetch property from model or str is like 'model.name'
it'll fetch property `name` from `model`
:param model: if field is str type, it'll may use model value to fetch property
:param field_cls: if not applied, it'll fetch from property, field_cls sould be Field class
:param builds_args_map:
:return: Field instance
### Response:
def make_form_field(field, model=None, field_cls=None,
use_default_value=True,
builds_args_map=None):
"""
make form field according field value
:param field: such as: str, Form Field instance, dict
if field is str type, it'll fetch property from model or str is like 'model.name'
it'll fetch property `name` from `model`
:param model: if field is str type, it'll may use model value to fetch property
:param field_cls: if not applied, it'll fetch from property, field_cls sould be Field class
:param builds_args_map:
:return: Field instance
"""
import uliweb.form as form
from uliweb.form.validators import TEST_MAXLENGTH
builds_args_map = builds_args_map or {}
if model:
model = get_model(model)
if isinstance(field, BaseField):
return field
if isinstance(field, dict) and 'field' in field and isinstance(field['field'], BaseField):
return field['field']
field_type = None
prop = None #model property if existed
#process prop
if isinstance(field, (str, unicode)):
field = {'name':field}
if isinstance(field, dict):
prop = field.get('prop')
if not prop and model:
#if there is field_name, then use it property class
field_name = field.get('field_name') or field['name']
prop = get_model_property(model, field_name)
#if existed field_cls, then it'll use it to create form field
if not prop and not field_cls:
raise UliwebError("Can't find property %s in Model(%r)" %
(field_name, model.__name__))
else:
raise UliwebError("Can't support this field %r type in make_form_field" % type(field))
default_kwargs = {}
if prop:
default_kwargs = {
'label':prop.label or prop.property_name,
'help_string':prop.hint,
'placeholder':prop.placeholder,
'html_attrs':prop.extra.get('html_attrs', {}),
'required':prop.required,
}
kwargs = {
'label':field.get('label') or field.get('verbose_name') or default_kwargs.get('label'),
'help_string':field.get('hint') or field.get('help_string') or default_kwargs.get('help_string'),
'placeholder':field.get('placeholder') or default_kwargs.get('placeholder'),
'html_attrs':field.get('extra', {}).get('html_attrs', {}) or default_kwargs.get('html_attrs'),
'required':field.get('required', False) if 'required' in field else default_kwargs.get('required', False)
}
#add data-url support
if 'data-url' in field:
kwargs['html_attrs']['data-url'] = field['data-url']
if use_default_value:
v = prop.default_value()
kwargs['default'] = v
if field.get('static'):
field_type = form.StringField
kwargs['required'] = False
kwargs['static'] = True
if prop.choices is not None:
kwargs['choices'] = prop.get_choices()
if field.get('hidden'):
field_type = form.HiddenField
if field_cls:
field_type = field_cls
elif not field_type:
cls = prop.__class__
type_name = prop.type_name
if type_name == 'BLOG':
pass
elif type_name in ('TEXT', 'JSON'):
field_type = form.TextField
elif type_name in ('CHAR', 'VARCHAR', 'UUID'):
if prop.choices is not None:
field_type = form.SelectField
kwargs['choices'] = prop.get_choices()
else:
if cls.__name__ == 'FileProperty':
field_type = form.FileField
kwargs['upload_to'] = prop.upload_to
kwargs['upload_to_sub'] = prop.upload_to_sub
else:
field_type = form.UnicodeField
elif type_name == 'BOOL':
field_type = form.BooleanField
elif type_name == 'DATE':
# if not prop.auto_now and not prop.auto_now_add:
field_type = form.DateField
elif type_name == 'TIME':
# if not prop.auto_now and not prop.auto_now_add:
field_type = form.TimeField
elif type_name == 'DATETIME':
# if not prop.auto_now and not prop.auto_now_add:
field_type = form.DateTimeField
elif type_name == 'DECIMAL':
field_type = form.StringField
if prop.choices is not None:
field_type = form.SelectField
kwargs['choices'] = prop.get_choices()
elif type_name == 'FLOAT':
field_type = form.FloatField
elif type_name == 'INTEGER':
if prop.choices is not None:
field_type = form.SelectField
kwargs['choices'] = prop.get_choices()
kwargs['datetype'] = int
else:
field_type = form.IntField
elif type_name == 'ManyToMany':
kwargs['model'] = prop.reference_class
field_type = ManyToManySelectField
elif type_name in ('Reference', 'OneToOne'):
#field_type = form.IntField
kwargs['model'] = prop.reference_class
kwargs['value_field'] = prop.reference_fieldname
field_type = ReferenceSelectField
else:
raise ValueError("Can't support the Property [%s=%s]" %
(field['name'], prop.__class__.__name__))
if field_type:
build_args = builds_args_map.get(field_type, {})
#add settings.ini configure support
#so you could add options in settings.ini like this
# [GENERIC_FIELDS_MAPPING]
# FormFieldClassName = {'build':'model.NewFormFieldTypeClassName', **other args}
#
# e.g.
# [GENERIC_FIELDS_MAPPING]
# DateField = {'build':'jquery.widgets.DatePicker'}
if not build_args:
build_args = get_fileds_builds().get(field_type, {})
kwargs.update(build_args)
#add max_length validator
if issubclass(prop.__class__, (orm.StringProperty, orm.CharProperty, orm.UnicodeProperty)):
v = kwargs.setdefault('validators', [])
if isinstance(prop.max_length, int):
v.append(TEST_MAXLENGTH(prop.max_length))
f = field_type(**kwargs)
return f |
def write(self, outfile, encoding):
"""Method override to create self-closing elements.
https://docs.djangoproject.com/en/2.0/ref/utils/#django.utils.feedgenerator.SyndicationFeed.write
https://github.com/django/django/blob/2.0/django/utils/feedgenerator.py#L216
"""
try:
handler = EscapeFriendlyXMLGenerator(outfile, encoding, short_empty_elements=True)
except TypeError: # Python 2
handler = EscapeFriendlyXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('rss', self.rss_attributes())
handler.startElement('channel', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement('rss') | Method override to create self-closing elements.
https://docs.djangoproject.com/en/2.0/ref/utils/#django.utils.feedgenerator.SyndicationFeed.write
https://github.com/django/django/blob/2.0/django/utils/feedgenerator.py#L216 | Below is the the instruction that describes the task:
### Input:
Method override to create self-closing elements.
https://docs.djangoproject.com/en/2.0/ref/utils/#django.utils.feedgenerator.SyndicationFeed.write
https://github.com/django/django/blob/2.0/django/utils/feedgenerator.py#L216
### Response:
def write(self, outfile, encoding):
"""Method override to create self-closing elements.
https://docs.djangoproject.com/en/2.0/ref/utils/#django.utils.feedgenerator.SyndicationFeed.write
https://github.com/django/django/blob/2.0/django/utils/feedgenerator.py#L216
"""
try:
handler = EscapeFriendlyXMLGenerator(outfile, encoding, short_empty_elements=True)
except TypeError: # Python 2
handler = EscapeFriendlyXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('rss', self.rss_attributes())
handler.startElement('channel', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement('rss') |
def connection_lost(self, exc):
'''Called by asyncio when the connection closes.
Tear down things done in connection_made.'''
# Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246
if self.transport:
self.transport = None
self.closed_event.set()
# Release waiting tasks
self._can_send.set()
# Cancelling directly leads to self-cancellation problems for member
# functions await-ing self.close()
self.loop.call_soon(self._task.cancel) | Called by asyncio when the connection closes.
Tear down things done in connection_made. | Below is the the instruction that describes the task:
### Input:
Called by asyncio when the connection closes.
Tear down things done in connection_made.
### Response:
def connection_lost(self, exc):
'''Called by asyncio when the connection closes.
Tear down things done in connection_made.'''
# Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246
if self.transport:
self.transport = None
self.closed_event.set()
# Release waiting tasks
self._can_send.set()
# Cancelling directly leads to self-cancellation problems for member
# functions await-ing self.close()
self.loop.call_soon(self._task.cancel) |
def get_layer(pressure, *args, **kwargs):
r"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
*args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
"""
# Pop off keyword arguments
heights = kwargs.pop('heights', None)
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', 100 * units.hPa)
interpolate = kwargs.pop('interpolate', True)
# If we get the depth kwarg, but it's None, set it to the default as well
if depth is None:
depth = 100 * units.hPa
# Make sure pressure and datavars are the same length
for datavar in args:
if len(pressure) != len(datavar):
raise ValueError('Pressure and data variables must have the same length.')
# If the bottom is not specified, make it the surface pressure
if bottom is None:
bottom = np.nanmax(pressure) * pressure.units
bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom,
heights=heights,
interpolate=interpolate)
# Calculate the top if whatever units depth is in
if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
top = bottom_pressure - depth
elif depth.dimensionality == {'[length]': 1}:
top = bottom_height + depth
else:
raise ValueError('Depth must be specified in units of length or pressure')
top_pressure, _ = _get_bound_pressure_height(pressure, top, heights=heights,
interpolate=interpolate)
ret = [] # returned data variables in layer
# Ensure pressures are sorted in ascending order
sort_inds = np.argsort(pressure)
pressure = pressure[sort_inds]
# Mask based on top and bottom pressure
inds = (_less_or_close(pressure, bottom_pressure)
& _greater_or_close(pressure, top_pressure))
p_interp = pressure[inds]
# Interpolate pressures at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if not np.any(np.isclose(top_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp, top_pressure)) * pressure.units
if not np.any(np.isclose(bottom_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp, bottom_pressure)) * pressure.units
ret.append(p_interp[::-1])
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = log_interpolate_1d(p_interp, pressure, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar[::-1])
return ret | r"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
*args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer | Below is the the instruction that describes the task:
### Input:
r"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
*args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
### Response:
def get_layer(pressure, *args, **kwargs):
r"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
*args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
"""
# Pop off keyword arguments
heights = kwargs.pop('heights', None)
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', 100 * units.hPa)
interpolate = kwargs.pop('interpolate', True)
# If we get the depth kwarg, but it's None, set it to the default as well
if depth is None:
depth = 100 * units.hPa
# Make sure pressure and datavars are the same length
for datavar in args:
if len(pressure) != len(datavar):
raise ValueError('Pressure and data variables must have the same length.')
# If the bottom is not specified, make it the surface pressure
if bottom is None:
bottom = np.nanmax(pressure) * pressure.units
bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom,
heights=heights,
interpolate=interpolate)
# Calculate the top if whatever units depth is in
if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
top = bottom_pressure - depth
elif depth.dimensionality == {'[length]': 1}:
top = bottom_height + depth
else:
raise ValueError('Depth must be specified in units of length or pressure')
top_pressure, _ = _get_bound_pressure_height(pressure, top, heights=heights,
interpolate=interpolate)
ret = [] # returned data variables in layer
# Ensure pressures are sorted in ascending order
sort_inds = np.argsort(pressure)
pressure = pressure[sort_inds]
# Mask based on top and bottom pressure
inds = (_less_or_close(pressure, bottom_pressure)
& _greater_or_close(pressure, top_pressure))
p_interp = pressure[inds]
# Interpolate pressures at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if not np.any(np.isclose(top_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp, top_pressure)) * pressure.units
if not np.any(np.isclose(bottom_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp, bottom_pressure)) * pressure.units
ret.append(p_interp[::-1])
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = log_interpolate_1d(p_interp, pressure, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar[::-1])
return ret |
def add_new_grid_headers(self, new_headers):
"""
Add in all user-added headers.
If those new headers depend on other headers,
add the other headers too.
"""
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
# add drop down menus for user-added column
if name in self.contribution.vocab.vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in self.contribution.vocab.suggested:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in ['specimen', 'sample', 'site',
'specimens', 'samples', 'sites']:
self.drop_down_menu.add_drop_down(col_number, name)
elif name == 'experiments':
self.drop_down_menu.add_drop_down(col_number, name)
if name == "method_codes":
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present | Add in all user-added headers.
If those new headers depend on other headers,
add the other headers too. | Below is the the instruction that describes the task:
### Input:
Add in all user-added headers.
If those new headers depend on other headers,
add the other headers too.
### Response:
def add_new_grid_headers(self, new_headers):
"""
Add in all user-added headers.
If those new headers depend on other headers,
add the other headers too.
"""
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
# add drop down menus for user-added column
if name in self.contribution.vocab.vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in self.contribution.vocab.suggested:
self.drop_down_menu.add_drop_down(col_number, name)
elif name in ['specimen', 'sample', 'site',
'specimens', 'samples', 'sites']:
self.drop_down_menu.add_drop_down(col_number, name)
elif name == 'experiments':
self.drop_down_menu.add_drop_down(col_number, name)
if name == "method_codes":
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present |
def render_diagram(out_base):
"""Render a data model diagram
Included in the diagram are all classes from the model registry.
For your project, write a small script that imports all models that you would like to
have included and then calls this function.
.. note:: This function requires the 'dot' executable from the GraphViz package to be installed
and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.
Args:
out_base (str): output base path (file endings will be appended)
"""
import codecs
import subprocess
import sadisplay
# generate class descriptions
desc = sadisplay.describe(list(model_registry.values()),
show_methods=False,
show_properties=True,
show_indexes=True,
)
# write description in DOT format
with codecs.open(out_base + '.dot', 'w', encoding='utf-8') as f:
f.write(sadisplay.dot(desc))
# check existence of DOT_EXECUTABLE variable and file
if not hasattr(config, 'DOT_EXECUTABLE'):
raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'")
if not os.path.exists(config.DOT_EXECUTABLE):
raise IOError("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE))
# render to image using DOT
# noinspection PyUnresolvedReferences
subprocess.check_call([
config.DOT_EXECUTABLE,
'-T', 'png',
'-o', out_base + '.png',
out_base + '.dot'
]) | Render a data model diagram
Included in the diagram are all classes from the model registry.
For your project, write a small script that imports all models that you would like to
have included and then calls this function.
.. note:: This function requires the 'dot' executable from the GraphViz package to be installed
and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.
Args:
out_base (str): output base path (file endings will be appended) | Below is the the instruction that describes the task:
### Input:
Render a data model diagram
Included in the diagram are all classes from the model registry.
For your project, write a small script that imports all models that you would like to
have included and then calls this function.
.. note:: This function requires the 'dot' executable from the GraphViz package to be installed
and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.
Args:
out_base (str): output base path (file endings will be appended)
### Response:
def render_diagram(out_base):
"""Render a data model diagram
Included in the diagram are all classes from the model registry.
For your project, write a small script that imports all models that you would like to
have included and then calls this function.
.. note:: This function requires the 'dot' executable from the GraphViz package to be installed
and its location configured in your `project_config.py` variable :attr:`DOT_EXECUTABLE`.
Args:
out_base (str): output base path (file endings will be appended)
"""
import codecs
import subprocess
import sadisplay
# generate class descriptions
desc = sadisplay.describe(list(model_registry.values()),
show_methods=False,
show_properties=True,
show_indexes=True,
)
# write description in DOT format
with codecs.open(out_base + '.dot', 'w', encoding='utf-8') as f:
f.write(sadisplay.dot(desc))
# check existence of DOT_EXECUTABLE variable and file
if not hasattr(config, 'DOT_EXECUTABLE'):
raise RuntimeError("Please configure the 'DOT_EXECUTABLE' variable in your 'project_config.py'")
if not os.path.exists(config.DOT_EXECUTABLE):
raise IOError("Could not find file pointed to by 'DOT_EXECUTABLE': " + str(config.DOT_EXECUTABLE))
# render to image using DOT
# noinspection PyUnresolvedReferences
subprocess.check_call([
config.DOT_EXECUTABLE,
'-T', 'png',
'-o', out_base + '.png',
out_base + '.dot'
]) |
def _iter_channels(framefile):
"""Yields the name and type of each channel in a GWF file TOC
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
framefile : `str`, `LDAStools.frameCPP.IFrameFStream`
path of GWF file, or open file stream, to read
"""
from LDAStools import frameCPP
if not isinstance(framefile, frameCPP.IFrameFStream):
framefile = open_gwf(framefile, 'r')
toc = framefile.GetTOC()
for typename in ('Sim', 'Proc', 'ADC'):
typen = typename.lower()
for name in getattr(toc, 'Get{0}'.format(typename))():
yield name, typen | Yields the name and type of each channel in a GWF file TOC
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
framefile : `str`, `LDAStools.frameCPP.IFrameFStream`
path of GWF file, or open file stream, to read | Below is the the instruction that describes the task:
### Input:
Yields the name and type of each channel in a GWF file TOC
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
framefile : `str`, `LDAStools.frameCPP.IFrameFStream`
path of GWF file, or open file stream, to read
### Response:
def _iter_channels(framefile):
"""Yields the name and type of each channel in a GWF file TOC
**Requires:** |LDAStools.frameCPP|_
Parameters
----------
framefile : `str`, `LDAStools.frameCPP.IFrameFStream`
path of GWF file, or open file stream, to read
"""
from LDAStools import frameCPP
if not isinstance(framefile, frameCPP.IFrameFStream):
framefile = open_gwf(framefile, 'r')
toc = framefile.GetTOC()
for typename in ('Sim', 'Proc', 'ADC'):
typen = typename.lower()
for name in getattr(toc, 'Get{0}'.format(typename))():
yield name, typen |
def _nac(self, q_direction):
"""nac_term = (A1 (x) A2) / B * coef.
"""
num_atom = self._pcell.get_number_of_atoms()
nac_q = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
if (np.abs(q_direction) < 1e-5).all():
return nac_q
rec_lat = np.linalg.inv(self._pcell.get_cell())
nac_factor = self._dynmat.get_nac_factor()
Z = self._dynmat.get_born_effective_charges()
e = self._dynmat.get_dielectric_constant()
q = np.dot(rec_lat, q_direction)
B = self._B(e, q)
for i in range(num_atom):
A_i = self._A(q, Z, i)
for j in range(num_atom):
A_j = self._A(q, Z, j)
nac_q[i, j] = np.outer(A_i, A_j) / B
num_satom = self._scell.get_number_of_atoms()
N = num_satom // num_atom
return nac_q * nac_factor / N | nac_term = (A1 (x) A2) / B * coef. | Below is the the instruction that describes the task:
### Input:
nac_term = (A1 (x) A2) / B * coef.
### Response:
def _nac(self, q_direction):
"""nac_term = (A1 (x) A2) / B * coef.
"""
num_atom = self._pcell.get_number_of_atoms()
nac_q = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
if (np.abs(q_direction) < 1e-5).all():
return nac_q
rec_lat = np.linalg.inv(self._pcell.get_cell())
nac_factor = self._dynmat.get_nac_factor()
Z = self._dynmat.get_born_effective_charges()
e = self._dynmat.get_dielectric_constant()
q = np.dot(rec_lat, q_direction)
B = self._B(e, q)
for i in range(num_atom):
A_i = self._A(q, Z, i)
for j in range(num_atom):
A_j = self._A(q, Z, j)
nac_q[i, j] = np.outer(A_i, A_j) / B
num_satom = self._scell.get_number_of_atoms()
N = num_satom // num_atom
return nac_q * nac_factor / N |
def _finalise_result_(compound, value, mass):
"""
Convert the value to its final form by unit conversions and multiplying
by mass.
:param compound: Compound object.
:param value: [J/mol] Value to be finalised.
:param mass: [kg] Mass of compound.
:returns: [kWh] Finalised value.
"""
result = value / 3.6E6 # J/x -> kWh/x
result = result / compound.molar_mass # x/mol -> x/kg
result = result * mass # x/kg -> x
return result | Convert the value to its final form by unit conversions and multiplying
by mass.
:param compound: Compound object.
:param value: [J/mol] Value to be finalised.
:param mass: [kg] Mass of compound.
:returns: [kWh] Finalised value. | Below is the the instruction that describes the task:
### Input:
Convert the value to its final form by unit conversions and multiplying
by mass.
:param compound: Compound object.
:param value: [J/mol] Value to be finalised.
:param mass: [kg] Mass of compound.
:returns: [kWh] Finalised value.
### Response:
def _finalise_result_(compound, value, mass):
"""
Convert the value to its final form by unit conversions and multiplying
by mass.
:param compound: Compound object.
:param value: [J/mol] Value to be finalised.
:param mass: [kg] Mass of compound.
:returns: [kWh] Finalised value.
"""
result = value / 3.6E6 # J/x -> kWh/x
result = result / compound.molar_mass # x/mol -> x/kg
result = result * mass # x/kg -> x
return result |
def cross_list_section(self, id, new_course_id):
"""
Cross-list a Section.
Move the Section to another course. The new course may be in a different account (department),
but must belong to the same root account (institution).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - PATH - new_course_id
"""ID"""
path["new_course_id"] = new_course_id
self.logger.debug("POST /api/v1/sections/{id}/crosslist/{new_course_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/sections/{id}/crosslist/{new_course_id}".format(**path), data=data, params=params, single_item=True) | Cross-list a Section.
Move the Section to another course. The new course may be in a different account (department),
but must belong to the same root account (institution). | Below is the the instruction that describes the task:
### Input:
Cross-list a Section.
Move the Section to another course. The new course may be in a different account (department),
but must belong to the same root account (institution).
### Response:
def cross_list_section(self, id, new_course_id):
"""
Cross-list a Section.
Move the Section to another course. The new course may be in a different account (department),
but must belong to the same root account (institution).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - PATH - new_course_id
"""ID"""
path["new_course_id"] = new_course_id
self.logger.debug("POST /api/v1/sections/{id}/crosslist/{new_course_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/sections/{id}/crosslist/{new_course_id}".format(**path), data=data, params=params, single_item=True) |
def intern(self, text):
"""Interns the given Unicode sequence into the symbol table.
Note:
This operation is only valid on local symbol tables.
Args:
text (unicode): The target to intern.
Returns:
SymbolToken: The mapped symbol token which may already exist in the table.
"""
if self.table_type.is_shared:
raise TypeError('Cannot intern on shared symbol table')
if not isinstance(text, six.text_type):
raise TypeError('Cannot intern non-Unicode sequence into symbol table: %r' % text)
token = self.get(text)
if token is None:
token = self.__add_text(text)
return token | Interns the given Unicode sequence into the symbol table.
Note:
This operation is only valid on local symbol tables.
Args:
text (unicode): The target to intern.
Returns:
SymbolToken: The mapped symbol token which may already exist in the table. | Below is the the instruction that describes the task:
### Input:
Interns the given Unicode sequence into the symbol table.
Note:
This operation is only valid on local symbol tables.
Args:
text (unicode): The target to intern.
Returns:
SymbolToken: The mapped symbol token which may already exist in the table.
### Response:
def intern(self, text):
"""Interns the given Unicode sequence into the symbol table.
Note:
This operation is only valid on local symbol tables.
Args:
text (unicode): The target to intern.
Returns:
SymbolToken: The mapped symbol token which may already exist in the table.
"""
if self.table_type.is_shared:
raise TypeError('Cannot intern on shared symbol table')
if not isinstance(text, six.text_type):
raise TypeError('Cannot intern non-Unicode sequence into symbol table: %r' % text)
token = self.get(text)
if token is None:
token = self.__add_text(text)
return token |
def get_subclass_from_module(module, parent_class):
"""
Get a subclass of parent_class from the module at module
get_subclass_from_module performs reflection to find the first class that
extends the parent_class in the module path, and returns it.
"""
try:
r = __recursive_import(module)
member_dict = dict(inspect.getmembers(r))
sprinter_class = parent_class
for v in member_dict.values():
if inspect.isclass(v) and issubclass(v, parent_class) and v != parent_class:
if sprinter_class is parent_class:
sprinter_class = v
if sprinter_class is None:
raise SprinterException("No subclass %s that extends %s exists in classpath!" % (module, str(parent_class)))
return sprinter_class
except ImportError:
e = sys.exc_info()[1]
raise e | Get a subclass of parent_class from the module at module
get_subclass_from_module performs reflection to find the first class that
extends the parent_class in the module path, and returns it. | Below is the the instruction that describes the task:
### Input:
Get a subclass of parent_class from the module at module
get_subclass_from_module performs reflection to find the first class that
extends the parent_class in the module path, and returns it.
### Response:
def get_subclass_from_module(module, parent_class):
"""
Get a subclass of parent_class from the module at module
get_subclass_from_module performs reflection to find the first class that
extends the parent_class in the module path, and returns it.
"""
try:
r = __recursive_import(module)
member_dict = dict(inspect.getmembers(r))
sprinter_class = parent_class
for v in member_dict.values():
if inspect.isclass(v) and issubclass(v, parent_class) and v != parent_class:
if sprinter_class is parent_class:
sprinter_class = v
if sprinter_class is None:
raise SprinterException("No subclass %s that extends %s exists in classpath!" % (module, str(parent_class)))
return sprinter_class
except ImportError:
e = sys.exc_info()[1]
raise e |
def fold_joint_sfs(s, n1, n2):
"""Fold a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes, n_chromosomes)
Joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int
Folded joint site frequency spectrum.
"""
# check inputs
s = asarray_ndim(s, 2)
assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes'
assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes'
# need to check s has all entries up to m
if s.shape[0] < n1 + 1:
sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype)
sm[:s.shape[0]] = s
s = sm
# need to check s has all entries up to n
if s.shape[1] < n2 + 1:
sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype)
sn[:, :s.shape[1]] = s
s = sn
# fold
mf = (n1 + 1) // 2
nf = (n2 + 1) // 2
n1 = mf * 2
n2 = nf * 2
o = (s[:mf, :nf] + # top left
s[mf:n1, :nf][::-1] + # top right
s[:mf, nf:n2][:, ::-1] + # bottom left
s[mf:n1, nf:n2][::-1, ::-1]) # bottom right
return o | Fold a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes, n_chromosomes)
Joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int
Folded joint site frequency spectrum. | Below is the the instruction that describes the task:
### Input:
Fold a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes, n_chromosomes)
Joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int
Folded joint site frequency spectrum.
### Response:
def fold_joint_sfs(s, n1, n2):
"""Fold a joint site frequency spectrum.
Parameters
----------
s : array_like, int, shape (m_chromosomes, n_chromosomes)
Joint site frequency spectrum.
n1, n2 : int, optional
The total number of chromosomes called in each population.
Returns
-------
joint_sfs_folded : ndarray, int
Folded joint site frequency spectrum.
"""
# check inputs
s = asarray_ndim(s, 2)
assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes'
assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes'
# need to check s has all entries up to m
if s.shape[0] < n1 + 1:
sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype)
sm[:s.shape[0]] = s
s = sm
# need to check s has all entries up to n
if s.shape[1] < n2 + 1:
sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype)
sn[:, :s.shape[1]] = s
s = sn
# fold
mf = (n1 + 1) // 2
nf = (n2 + 1) // 2
n1 = mf * 2
n2 = nf * 2
o = (s[:mf, :nf] + # top left
s[mf:n1, :nf][::-1] + # top right
s[:mf, nf:n2][:, ::-1] + # bottom left
s[mf:n1, nf:n2][::-1, ::-1]) # bottom right
return o |
def _get_sorted_relationships(self, goterm):
"""Traverse GO Terms above the current GO Term. Then add current GO Term to sorted."""
if goterm.id in self.goids_seen:
return
self.goids_seen.add(goterm.id)
for goterm_upper in goterm.get_goterms_upper():
self._get_sorted_relationships(goterm_upper)
self.goterms_sorted.append(goterm) | Traverse GO Terms above the current GO Term. Then add current GO Term to sorted. | Below is the the instruction that describes the task:
### Input:
Traverse GO Terms above the current GO Term. Then add current GO Term to sorted.
### Response:
def _get_sorted_relationships(self, goterm):
"""Traverse GO Terms above the current GO Term. Then add current GO Term to sorted."""
if goterm.id in self.goids_seen:
return
self.goids_seen.add(goterm.id)
for goterm_upper in goterm.get_goterms_upper():
self._get_sorted_relationships(goterm_upper)
self.goterms_sorted.append(goterm) |
def get_variables_substitution_dictionaries(self, lhs_graph, rhs_graph):
"""
Looks for sub-isomorphisms of rhs into lhs
:param lhs_graph: The graph to look sub-isomorphisms into (the bigger graph)
:param rhs_graph: The smaller graph
:return: The list of matching names
"""
if not rhs_graph:
return {}, {}, {}
self.matching_code_container.add_graph_to_namespace(lhs_graph)
self.matching_code_container.add_graph_to_namespace(rhs_graph)
return self.__collect_variables_that_match_graph(lhs_graph, rhs_graph) | Looks for sub-isomorphisms of rhs into lhs
:param lhs_graph: The graph to look sub-isomorphisms into (the bigger graph)
:param rhs_graph: The smaller graph
:return: The list of matching names | Below is the the instruction that describes the task:
### Input:
Looks for sub-isomorphisms of rhs into lhs
:param lhs_graph: The graph to look sub-isomorphisms into (the bigger graph)
:param rhs_graph: The smaller graph
:return: The list of matching names
### Response:
def get_variables_substitution_dictionaries(self, lhs_graph, rhs_graph):
"""
Looks for sub-isomorphisms of rhs into lhs
:param lhs_graph: The graph to look sub-isomorphisms into (the bigger graph)
:param rhs_graph: The smaller graph
:return: The list of matching names
"""
if not rhs_graph:
return {}, {}, {}
self.matching_code_container.add_graph_to_namespace(lhs_graph)
self.matching_code_container.add_graph_to_namespace(rhs_graph)
return self.__collect_variables_that_match_graph(lhs_graph, rhs_graph) |
def _sample_points(X, centers, oversampling_factor, random_state):
r"""
Sample points independently with probability
.. math::
p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})}
"""
# re-implement evaluate_cost here, to avoid redundant computation
distances = pairwise_distances(X, centers).min(1) ** 2
denom = distances.sum()
p = oversampling_factor * distances / denom
draws = random_state.uniform(size=len(p), chunks=p.chunks)
picked = p > draws
new_idxs, = da.where(picked)
return new_idxs | r"""
Sample points independently with probability
.. math::
p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})} | Below is the the instruction that describes the task:
### Input:
r"""
Sample points independently with probability
.. math::
p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})}
### Response:
def _sample_points(X, centers, oversampling_factor, random_state):
r"""
Sample points independently with probability
.. math::
p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})}
"""
# re-implement evaluate_cost here, to avoid redundant computation
distances = pairwise_distances(X, centers).min(1) ** 2
denom = distances.sum()
p = oversampling_factor * distances / denom
draws = random_state.uniform(size=len(p), chunks=p.chunks)
picked = p > draws
new_idxs, = da.where(picked)
return new_idxs |
def handle(self, *args, **options):
"""
Command execution.
"""
self.cursor = connection.cursor()
self.introspection = connection.introspection
self.interactive = options['interactive']
found_missing_fields = False
models = translator.get_registered_models(abstract=False)
for model in models:
db_table = model._meta.db_table
model_name = model._meta.model_name
model_full_name = '%s.%s' % (model._meta.app_label, model_name)
opts = translator.get_options_for_model(model)
for field_name, fields in opts.local_fields.items():
# Take `db_column` attribute into account
try:
field = list(fields)[0]
except IndexError:
# Ignore IndexError for ProxyModel
# maybe there is better way to handle this
continue
column_name = field.db_column if field.db_column else field_name
missing_langs = list(self.get_missing_languages(column_name, db_table))
if missing_langs:
found_missing_fields = True
print_missing_langs(missing_langs, field_name, model_full_name)
sql_sentences = self.get_sync_sql(field_name, missing_langs, model)
execute_sql = ask_for_confirmation(
sql_sentences, model_full_name, self.interactive)
if execute_sql:
print('Executing SQL...')
for sentence in sql_sentences:
self.cursor.execute(sentence)
print('Done')
else:
print('SQL not executed')
if not found_missing_fields:
print('No new translatable fields detected') | Command execution. | Below is the the instruction that describes the task:
### Input:
Command execution.
### Response:
def handle(self, *args, **options):
"""
Command execution.
"""
self.cursor = connection.cursor()
self.introspection = connection.introspection
self.interactive = options['interactive']
found_missing_fields = False
models = translator.get_registered_models(abstract=False)
for model in models:
db_table = model._meta.db_table
model_name = model._meta.model_name
model_full_name = '%s.%s' % (model._meta.app_label, model_name)
opts = translator.get_options_for_model(model)
for field_name, fields in opts.local_fields.items():
# Take `db_column` attribute into account
try:
field = list(fields)[0]
except IndexError:
# Ignore IndexError for ProxyModel
# maybe there is better way to handle this
continue
column_name = field.db_column if field.db_column else field_name
missing_langs = list(self.get_missing_languages(column_name, db_table))
if missing_langs:
found_missing_fields = True
print_missing_langs(missing_langs, field_name, model_full_name)
sql_sentences = self.get_sync_sql(field_name, missing_langs, model)
execute_sql = ask_for_confirmation(
sql_sentences, model_full_name, self.interactive)
if execute_sql:
print('Executing SQL...')
for sentence in sql_sentences:
self.cursor.execute(sentence)
print('Done')
else:
print('SQL not executed')
if not found_missing_fields:
print('No new translatable fields detected') |
def _set_any(self, v, load=False):
"""
Setter method for any, mapped from YANG variable /overlay_class_map/cmap_seq/match/any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="any", rest_name="any", parent=self, choice=(u'overlay-match-ip', u'case-overlay-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Any IP Address'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """any must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="any", rest_name="any", parent=self, choice=(u'overlay-match-ip', u'case-overlay-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Any IP Address'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='empty', is_config=True)""",
})
self.__any = t
if hasattr(self, '_set'):
self._set() | Setter method for any, mapped from YANG variable /overlay_class_map/cmap_seq/match/any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_any() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for any, mapped from YANG variable /overlay_class_map/cmap_seq/match/any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_any() directly.
### Response:
def _set_any(self, v, load=False):
"""
Setter method for any, mapped from YANG variable /overlay_class_map/cmap_seq/match/any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="any", rest_name="any", parent=self, choice=(u'overlay-match-ip', u'case-overlay-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Any IP Address'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """any must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="any", rest_name="any", parent=self, choice=(u'overlay-match-ip', u'case-overlay-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Any IP Address'}}, namespace='urn:brocade.com:mgmt:brocade-overlay-policy', defining_module='brocade-overlay-policy', yang_type='empty', is_config=True)""",
})
self.__any = t
if hasattr(self, '_set'):
self._set() |
def _email(name, *, allow_unverified=False):
"""
This decorator is used to turn an e function into an email sending function!
The name parameter is the name of the email we're going to be sending (used to
locate the templates on the file system).
The allow_unverified kwarg flags whether we will send this email to an unverified
email or not. We generally do not want to do this, but some emails are important
enough or have special requirements that require it.
Functions that are decorated by this need to accept two positional arguments, the
first argument is the Pyramid request object, and the second argument is either
a single User, or a list of Users. These users represent the recipients of this
email. Additional keyword arguments are supported, but are not otherwise restricted.
Functions decorated by this must return a mapping of context variables that will
ultimately be returned, but which will also be used to render the templates for
the emails.
Thus this function can decorate functions with a signature like so:
def foo(
request: Request, user_or_users: Union[User, List[User]]
) -> Mapping[str, Any]:
...
Finally, if the email needs to be sent to an address *other* than the user's primary
email address, instead of a User object, a tuple of (User, Email) objects may be
used in place of a User object.
"""
def inner(fn):
@functools.wraps(fn)
def wrapper(request, user_or_users, **kwargs):
if isinstance(user_or_users, (list, set)):
recipients = user_or_users
else:
recipients = [user_or_users]
context = fn(request, user_or_users, **kwargs)
msg = EmailMessage.from_template(name, context, request=request)
for recipient in recipients:
if isinstance(recipient, tuple):
user, email = recipient
else:
user, email = recipient, None
_send_email_to_user(
request, user, msg, email=email, allow_unverified=allow_unverified
)
return context
return wrapper
return inner | This decorator is used to turn an e function into an email sending function!
The name parameter is the name of the email we're going to be sending (used to
locate the templates on the file system).
The allow_unverified kwarg flags whether we will send this email to an unverified
email or not. We generally do not want to do this, but some emails are important
enough or have special requirements that require it.
Functions that are decorated by this need to accept two positional arguments, the
first argument is the Pyramid request object, and the second argument is either
a single User, or a list of Users. These users represent the recipients of this
email. Additional keyword arguments are supported, but are not otherwise restricted.
Functions decorated by this must return a mapping of context variables that will
ultimately be returned, but which will also be used to render the templates for
the emails.
Thus this function can decorate functions with a signature like so:
def foo(
request: Request, user_or_users: Union[User, List[User]]
) -> Mapping[str, Any]:
...
Finally, if the email needs to be sent to an address *other* than the user's primary
email address, instead of a User object, a tuple of (User, Email) objects may be
used in place of a User object. | Below is the the instruction that describes the task:
### Input:
This decorator is used to turn an e function into an email sending function!
The name parameter is the name of the email we're going to be sending (used to
locate the templates on the file system).
The allow_unverified kwarg flags whether we will send this email to an unverified
email or not. We generally do not want to do this, but some emails are important
enough or have special requirements that require it.
Functions that are decorated by this need to accept two positional arguments, the
first argument is the Pyramid request object, and the second argument is either
a single User, or a list of Users. These users represent the recipients of this
email. Additional keyword arguments are supported, but are not otherwise restricted.
Functions decorated by this must return a mapping of context variables that will
ultimately be returned, but which will also be used to render the templates for
the emails.
Thus this function can decorate functions with a signature like so:
def foo(
request: Request, user_or_users: Union[User, List[User]]
) -> Mapping[str, Any]:
...
Finally, if the email needs to be sent to an address *other* than the user's primary
email address, instead of a User object, a tuple of (User, Email) objects may be
used in place of a User object.
### Response:
def _email(name, *, allow_unverified=False):
"""
This decorator is used to turn an e function into an email sending function!
The name parameter is the name of the email we're going to be sending (used to
locate the templates on the file system).
The allow_unverified kwarg flags whether we will send this email to an unverified
email or not. We generally do not want to do this, but some emails are important
enough or have special requirements that require it.
Functions that are decorated by this need to accept two positional arguments, the
first argument is the Pyramid request object, and the second argument is either
a single User, or a list of Users. These users represent the recipients of this
email. Additional keyword arguments are supported, but are not otherwise restricted.
Functions decorated by this must return a mapping of context variables that will
ultimately be returned, but which will also be used to render the templates for
the emails.
Thus this function can decorate functions with a signature like so:
def foo(
request: Request, user_or_users: Union[User, List[User]]
) -> Mapping[str, Any]:
...
Finally, if the email needs to be sent to an address *other* than the user's primary
email address, instead of a User object, a tuple of (User, Email) objects may be
used in place of a User object.
"""
def inner(fn):
@functools.wraps(fn)
def wrapper(request, user_or_users, **kwargs):
if isinstance(user_or_users, (list, set)):
recipients = user_or_users
else:
recipients = [user_or_users]
context = fn(request, user_or_users, **kwargs)
msg = EmailMessage.from_template(name, context, request=request)
for recipient in recipients:
if isinstance(recipient, tuple):
user, email = recipient
else:
user, email = recipient, None
_send_email_to_user(
request, user, msg, email=email, allow_unverified=allow_unverified
)
return context
return wrapper
return inner |
def switch_to_frame_with_id(self, frame):
"""Swap Selenium's context to the given frame or iframe."""
elem = world.browser.find_element_by_id(frame)
world.browser.switch_to.frame(elem) | Swap Selenium's context to the given frame or iframe. | Below is the the instruction that describes the task:
### Input:
Swap Selenium's context to the given frame or iframe.
### Response:
def switch_to_frame_with_id(self, frame):
"""Swap Selenium's context to the given frame or iframe."""
elem = world.browser.find_element_by_id(frame)
world.browser.switch_to.frame(elem) |
def _get_config(self, section: str, key: str, fallback: str=object()) -> str:
"""
Gets a string config value
:param section: Section
:param key: Key
:param fallback: Optional fallback value
"""
return self._config.get(section, key, fallback=fallback) | Gets a string config value
:param section: Section
:param key: Key
:param fallback: Optional fallback value | Below is the the instruction that describes the task:
### Input:
Gets a string config value
:param section: Section
:param key: Key
:param fallback: Optional fallback value
### Response:
def _get_config(self, section: str, key: str, fallback: str=object()) -> str:
"""
Gets a string config value
:param section: Section
:param key: Key
:param fallback: Optional fallback value
"""
return self._config.get(section, key, fallback=fallback) |
def remove_initial_spaces_and_mark_message_lines(lines):
"""
Removes the initial spaces in each line before marking message lines.
This ensures headers can be identified if they are indented with spaces.
"""
i = 0
while i < len(lines):
lines[i] = lines[i].lstrip(' ')
i += 1
return mark_message_lines(lines) | Removes the initial spaces in each line before marking message lines.
This ensures headers can be identified if they are indented with spaces. | Below is the the instruction that describes the task:
### Input:
Removes the initial spaces in each line before marking message lines.
This ensures headers can be identified if they are indented with spaces.
### Response:
def remove_initial_spaces_and_mark_message_lines(lines):
"""
Removes the initial spaces in each line before marking message lines.
This ensures headers can be identified if they are indented with spaces.
"""
i = 0
while i < len(lines):
lines[i] = lines[i].lstrip(' ')
i += 1
return mark_message_lines(lines) |
def plot_filters(filters):
'''Create a plot of conv filters, visualized as pixel arrays.'''
imgs = filters.get_value()
N, channels, x, y = imgs.shape
n = int(np.sqrt(N))
assert n * n == N, 'filters must contain a square number of rows!'
assert channels == 1 or channels == 3, 'can only plot grayscale or rgb filters!'
img = np.zeros(((y+1) * n - 1, (x+1) * n - 1, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * (y+1):(r+1) * (y+1) - 1,
c * (x+1):(c+1) * (x+1) - 1] = pix.transpose((1, 2, 0))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray) | Create a plot of conv filters, visualized as pixel arrays. | Below is the the instruction that describes the task:
### Input:
Create a plot of conv filters, visualized as pixel arrays.
### Response:
def plot_filters(filters):
'''Create a plot of conv filters, visualized as pixel arrays.'''
imgs = filters.get_value()
N, channels, x, y = imgs.shape
n = int(np.sqrt(N))
assert n * n == N, 'filters must contain a square number of rows!'
assert channels == 1 or channels == 3, 'can only plot grayscale or rgb filters!'
img = np.zeros(((y+1) * n - 1, (x+1) * n - 1, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * (y+1):(r+1) * (y+1) - 1,
c * (x+1):(c+1) * (x+1) - 1] = pix.transpose((1, 2, 0))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray) |
def write(self, writer=None, encoding='utf-8', indent=0, newline='',
omit_declaration=False, node_depth=0, quote_char='"'):
"""
Serialize this node and its descendants to text, writing
the output to a given *writer* or to stdout.
:param writer: an object such as a file or stream to which XML text
is sent. If *None* text is sent to :attr:`sys.stdout`.
:type writer: a file, stream, etc or None
:param string encoding: the character encoding for serialized text.
:param indent: indentation prefix to apply to descendent nodes for
pretty-printing. The value can take many forms:
- *int*: the number of spaces to indent. 0 means no indent.
- *string*: a literal prefix for indented nodes, such as ``\\t``.
- *bool*: no indent if *False*, four spaces indent if *True*.
- *None*: no indent
:type indent: string, int, bool, or None
:param newline: the string value used to separate lines of output.
The value can take a number of forms:
- *string*: the literal newline value, such as ``\\n`` or ``\\r``.
An empty string means no newline.
- *bool*: no newline if *False*, ``\\n`` newline if *True*.
- *None*: no newline.
:type newline: string, bool, or None
:param boolean omit_declaration: if *True* the XML declaration header
is omitted, otherwise it is included. Note that the declaration is
only output when serializing an :class:`xml4h.nodes.Document` node.
:param int node_depth: the indentation level to start at, such as 2 to
indent output as if the given *node* has two ancestors.
This parameter will only be useful if you need to output XML text
fragments that can be assembled into a document. This parameter
has no effect unless indentation is applied.
:param string quote_char: the character that delimits quoted content.
You should never need to mess with this.
Delegates to :func:`xml4h.writer.write_node` applied to this node.
"""
xml4h.write_node(self,
writer=writer, encoding=encoding, indent=indent,
newline=newline, omit_declaration=omit_declaration,
node_depth=node_depth, quote_char=quote_char) | Serialize this node and its descendants to text, writing
the output to a given *writer* or to stdout.
:param writer: an object such as a file or stream to which XML text
is sent. If *None* text is sent to :attr:`sys.stdout`.
:type writer: a file, stream, etc or None
:param string encoding: the character encoding for serialized text.
:param indent: indentation prefix to apply to descendent nodes for
pretty-printing. The value can take many forms:
- *int*: the number of spaces to indent. 0 means no indent.
- *string*: a literal prefix for indented nodes, such as ``\\t``.
- *bool*: no indent if *False*, four spaces indent if *True*.
- *None*: no indent
:type indent: string, int, bool, or None
:param newline: the string value used to separate lines of output.
The value can take a number of forms:
- *string*: the literal newline value, such as ``\\n`` or ``\\r``.
An empty string means no newline.
- *bool*: no newline if *False*, ``\\n`` newline if *True*.
- *None*: no newline.
:type newline: string, bool, or None
:param boolean omit_declaration: if *True* the XML declaration header
is omitted, otherwise it is included. Note that the declaration is
only output when serializing an :class:`xml4h.nodes.Document` node.
:param int node_depth: the indentation level to start at, such as 2 to
indent output as if the given *node* has two ancestors.
This parameter will only be useful if you need to output XML text
fragments that can be assembled into a document. This parameter
has no effect unless indentation is applied.
:param string quote_char: the character that delimits quoted content.
You should never need to mess with this.
Delegates to :func:`xml4h.writer.write_node` applied to this node. | Below is the the instruction that describes the task:
### Input:
Serialize this node and its descendants to text, writing
the output to a given *writer* or to stdout.
:param writer: an object such as a file or stream to which XML text
is sent. If *None* text is sent to :attr:`sys.stdout`.
:type writer: a file, stream, etc or None
:param string encoding: the character encoding for serialized text.
:param indent: indentation prefix to apply to descendent nodes for
pretty-printing. The value can take many forms:
- *int*: the number of spaces to indent. 0 means no indent.
- *string*: a literal prefix for indented nodes, such as ``\\t``.
- *bool*: no indent if *False*, four spaces indent if *True*.
- *None*: no indent
:type indent: string, int, bool, or None
:param newline: the string value used to separate lines of output.
The value can take a number of forms:
- *string*: the literal newline value, such as ``\\n`` or ``\\r``.
An empty string means no newline.
- *bool*: no newline if *False*, ``\\n`` newline if *True*.
- *None*: no newline.
:type newline: string, bool, or None
:param boolean omit_declaration: if *True* the XML declaration header
is omitted, otherwise it is included. Note that the declaration is
only output when serializing an :class:`xml4h.nodes.Document` node.
:param int node_depth: the indentation level to start at, such as 2 to
indent output as if the given *node* has two ancestors.
This parameter will only be useful if you need to output XML text
fragments that can be assembled into a document. This parameter
has no effect unless indentation is applied.
:param string quote_char: the character that delimits quoted content.
You should never need to mess with this.
Delegates to :func:`xml4h.writer.write_node` applied to this node.
### Response:
def write(self, writer=None, encoding='utf-8', indent=0, newline='',
omit_declaration=False, node_depth=0, quote_char='"'):
"""
Serialize this node and its descendants to text, writing
the output to a given *writer* or to stdout.
:param writer: an object such as a file or stream to which XML text
is sent. If *None* text is sent to :attr:`sys.stdout`.
:type writer: a file, stream, etc or None
:param string encoding: the character encoding for serialized text.
:param indent: indentation prefix to apply to descendent nodes for
pretty-printing. The value can take many forms:
- *int*: the number of spaces to indent. 0 means no indent.
- *string*: a literal prefix for indented nodes, such as ``\\t``.
- *bool*: no indent if *False*, four spaces indent if *True*.
- *None*: no indent
:type indent: string, int, bool, or None
:param newline: the string value used to separate lines of output.
The value can take a number of forms:
- *string*: the literal newline value, such as ``\\n`` or ``\\r``.
An empty string means no newline.
- *bool*: no newline if *False*, ``\\n`` newline if *True*.
- *None*: no newline.
:type newline: string, bool, or None
:param boolean omit_declaration: if *True* the XML declaration header
is omitted, otherwise it is included. Note that the declaration is
only output when serializing an :class:`xml4h.nodes.Document` node.
:param int node_depth: the indentation level to start at, such as 2 to
indent output as if the given *node* has two ancestors.
This parameter will only be useful if you need to output XML text
fragments that can be assembled into a document. This parameter
has no effect unless indentation is applied.
:param string quote_char: the character that delimits quoted content.
You should never need to mess with this.
Delegates to :func:`xml4h.writer.write_node` applied to this node.
"""
xml4h.write_node(self,
writer=writer, encoding=encoding, indent=indent,
newline=newline, omit_declaration=omit_declaration,
node_depth=node_depth, quote_char=quote_char) |
def _tune(self, args):
"""
propose connection tuning parameters
This method proposes a set of connection configuration values
to the client. The client can accept and/or adjust these.
PARAMETERS:
channel_max: short
proposed maximum channels
The maximum total number of channels that the server
allows per connection. Zero means that the server does
not impose a fixed limit, but the number of allowed
channels may be limited by available server resources.
frame_max: long
proposed maximum frame size
The largest frame size that the server proposes for
the connection. The client can negotiate a lower
value. Zero means that the server does not impose any
specific limit but may reject very large frames if it
cannot allocate resources for them.
RULE:
Until the frame-max has been negotiated, both
peers MUST accept frames of up to 4096 octets
large. The minimum non-zero value for the frame-
max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat
that the server wants. Zero means the server does not
want a heartbeat.
"""
self.channel_max = args.read_short() or self.channel_max
self.frame_max = args.read_long() or self.frame_max
self.method_writer.frame_max = self.frame_max
self.heartbeat = args.read_short()
self._x_tune_ok(self.channel_max, self.frame_max, 0) | propose connection tuning parameters
This method proposes a set of connection configuration values
to the client. The client can accept and/or adjust these.
PARAMETERS:
channel_max: short
proposed maximum channels
The maximum total number of channels that the server
allows per connection. Zero means that the server does
not impose a fixed limit, but the number of allowed
channels may be limited by available server resources.
frame_max: long
proposed maximum frame size
The largest frame size that the server proposes for
the connection. The client can negotiate a lower
value. Zero means that the server does not impose any
specific limit but may reject very large frames if it
cannot allocate resources for them.
RULE:
Until the frame-max has been negotiated, both
peers MUST accept frames of up to 4096 octets
large. The minimum non-zero value for the frame-
max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat
that the server wants. Zero means the server does not
want a heartbeat. | Below is the the instruction that describes the task:
### Input:
propose connection tuning parameters
This method proposes a set of connection configuration values
to the client. The client can accept and/or adjust these.
PARAMETERS:
channel_max: short
proposed maximum channels
The maximum total number of channels that the server
allows per connection. Zero means that the server does
not impose a fixed limit, but the number of allowed
channels may be limited by available server resources.
frame_max: long
proposed maximum frame size
The largest frame size that the server proposes for
the connection. The client can negotiate a lower
value. Zero means that the server does not impose any
specific limit but may reject very large frames if it
cannot allocate resources for them.
RULE:
Until the frame-max has been negotiated, both
peers MUST accept frames of up to 4096 octets
large. The minimum non-zero value for the frame-
max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat
that the server wants. Zero means the server does not
want a heartbeat.
### Response:
def _tune(self, args):
"""
propose connection tuning parameters
This method proposes a set of connection configuration values
to the client. The client can accept and/or adjust these.
PARAMETERS:
channel_max: short
proposed maximum channels
The maximum total number of channels that the server
allows per connection. Zero means that the server does
not impose a fixed limit, but the number of allowed
channels may be limited by available server resources.
frame_max: long
proposed maximum frame size
The largest frame size that the server proposes for
the connection. The client can negotiate a lower
value. Zero means that the server does not impose any
specific limit but may reject very large frames if it
cannot allocate resources for them.
RULE:
Until the frame-max has been negotiated, both
peers MUST accept frames of up to 4096 octets
large. The minimum non-zero value for the frame-
max field is 4096.
heartbeat: short
desired heartbeat delay
The delay, in seconds, of the connection heartbeat
that the server wants. Zero means the server does not
want a heartbeat.
"""
self.channel_max = args.read_short() or self.channel_max
self.frame_max = args.read_long() or self.frame_max
self.method_writer.frame_max = self.frame_max
self.heartbeat = args.read_short()
self._x_tune_ok(self.channel_max, self.frame_max, 0) |
def setup_stanza_handlers(self, handler_objects, usage_restriction):
"""Install stanza handlers provided by `handler_objects`"""
# pylint: disable=W0212
iq_handlers = {"get": {}, "set": {}}
message_handlers = []
presence_handlers = []
for obj in handler_objects:
if not isinstance(obj, XMPPFeatureHandler):
continue
obj.stanza_processor = self
for dummy, handler in inspect.getmembers(obj, callable):
if not hasattr(handler, "_pyxmpp_stanza_handled"):
continue
element_name, stanza_type = handler._pyxmpp_stanza_handled
restr = handler._pyxmpp_usage_restriction
if restr and restr != usage_restriction:
continue
if element_name == "iq":
payload_class = handler._pyxmpp_payload_class_handled
payload_key = handler._pyxmpp_payload_key
if (payload_class, payload_key) in iq_handlers[stanza_type]:
continue
iq_handlers[stanza_type][(payload_class, payload_key)] = \
handler
continue
elif element_name == "message":
handler_list = message_handlers
elif element_name == "presence":
handler_list = presence_handlers
else:
raise ValueError, "Bad handler decoration"
handler_list.append(handler)
with self.lock:
self._iq_handlers = iq_handlers
self._presence_handlers = presence_handlers
self._message_handlers = message_handlers | Install stanza handlers provided by `handler_objects` | Below is the the instruction that describes the task:
### Input:
Install stanza handlers provided by `handler_objects`
### Response:
def setup_stanza_handlers(self, handler_objects, usage_restriction):
"""Install stanza handlers provided by `handler_objects`"""
# pylint: disable=W0212
iq_handlers = {"get": {}, "set": {}}
message_handlers = []
presence_handlers = []
for obj in handler_objects:
if not isinstance(obj, XMPPFeatureHandler):
continue
obj.stanza_processor = self
for dummy, handler in inspect.getmembers(obj, callable):
if not hasattr(handler, "_pyxmpp_stanza_handled"):
continue
element_name, stanza_type = handler._pyxmpp_stanza_handled
restr = handler._pyxmpp_usage_restriction
if restr and restr != usage_restriction:
continue
if element_name == "iq":
payload_class = handler._pyxmpp_payload_class_handled
payload_key = handler._pyxmpp_payload_key
if (payload_class, payload_key) in iq_handlers[stanza_type]:
continue
iq_handlers[stanza_type][(payload_class, payload_key)] = \
handler
continue
elif element_name == "message":
handler_list = message_handlers
elif element_name == "presence":
handler_list = presence_handlers
else:
raise ValueError, "Bad handler decoration"
handler_list.append(handler)
with self.lock:
self._iq_handlers = iq_handlers
self._presence_handlers = presence_handlers
self._message_handlers = message_handlers |
def generate_pws_in_order(self, n, filter_func=None, N_max=1e6):
"""
Generates passwords in order between upto N_max
@N_max is the maximum size of the priority queue will be tolerated,
so if the size of the queue is bigger than 1.5 * N_max, it will shrink
the size to 0.75 * N_max
@n is the number of password to generate.
**This function is expensive, and shuold be called only if necessary.
Cache its call as much as possible**
# TODO: Need to recheck how to make sure this is working.
"""
# assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta)
states = [(-1.0, helper.START)]
# get the topk first
p_min = 1e-9 / (n**2) # max 1 million entries in the heap
ret = []
done = set()
already_added_in_heap = set()
while len(ret) < n and len(states) > 0:
# while n > 0 and len(states) > 0:
p, s = heapq.heappop(states)
if p < 0:
p = -p
if s in done: continue
assert s[0] == helper.START, "Broken s: {!r}".format(s)
if s[-1] == helper.END:
done.add(s)
clean_s = s[1:-1]
if filter_func is None or filter_func(clean_s):
ret.append((clean_s, p))
# n -= 1
# yield (clean_s, p)
else:
for c, f in self._get_next(s).items():
if (f*p < p_min or (s+c) in done or
(s+c) in already_added_in_heap):
continue
already_added_in_heap.add(s+c)
heapq.heappush(states, (-f*p, s+c))
if len(states) > N_max * 3 / 2:
print("Heap size: {}. ret={}. (expected: {}) s={!r}"
.format(len(states), len(ret), n, s))
print("The size of states={}. Still need={} pws. Truncating"
.format(len(states), n - len(ret)))
states = heapq.nsmallest(int(N_max * 3/4), states)
print("Done")
return ret | Generates passwords in order between upto N_max
@N_max is the maximum size of the priority queue will be tolerated,
so if the size of the queue is bigger than 1.5 * N_max, it will shrink
the size to 0.75 * N_max
@n is the number of password to generate.
**This function is expensive, and shuold be called only if necessary.
Cache its call as much as possible**
# TODO: Need to recheck how to make sure this is working. | Below is the the instruction that describes the task:
### Input:
Generates passwords in order between upto N_max
@N_max is the maximum size of the priority queue will be tolerated,
so if the size of the queue is bigger than 1.5 * N_max, it will shrink
the size to 0.75 * N_max
@n is the number of password to generate.
**This function is expensive, and shuold be called only if necessary.
Cache its call as much as possible**
# TODO: Need to recheck how to make sure this is working.
### Response:
def generate_pws_in_order(self, n, filter_func=None, N_max=1e6):
"""
Generates passwords in order between upto N_max
@N_max is the maximum size of the priority queue will be tolerated,
so if the size of the queue is bigger than 1.5 * N_max, it will shrink
the size to 0.75 * N_max
@n is the number of password to generate.
**This function is expensive, and shuold be called only if necessary.
Cache its call as much as possible**
# TODO: Need to recheck how to make sure this is working.
"""
# assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta)
states = [(-1.0, helper.START)]
# get the topk first
p_min = 1e-9 / (n**2) # max 1 million entries in the heap
ret = []
done = set()
already_added_in_heap = set()
while len(ret) < n and len(states) > 0:
# while n > 0 and len(states) > 0:
p, s = heapq.heappop(states)
if p < 0:
p = -p
if s in done: continue
assert s[0] == helper.START, "Broken s: {!r}".format(s)
if s[-1] == helper.END:
done.add(s)
clean_s = s[1:-1]
if filter_func is None or filter_func(clean_s):
ret.append((clean_s, p))
# n -= 1
# yield (clean_s, p)
else:
for c, f in self._get_next(s).items():
if (f*p < p_min or (s+c) in done or
(s+c) in already_added_in_heap):
continue
already_added_in_heap.add(s+c)
heapq.heappush(states, (-f*p, s+c))
if len(states) > N_max * 3 / 2:
print("Heap size: {}. ret={}. (expected: {}) s={!r}"
.format(len(states), len(ret), n, s))
print("The size of states={}. Still need={} pws. Truncating"
.format(len(states), n - len(ret)))
states = heapq.nsmallest(int(N_max * 3/4), states)
print("Done")
return ret |
def pix2sky(self, pixel):
"""
Get the sky coordinates for a given image pixel.
Parameters
----------
pixel : (float, float)
Image coordinates.
Returns
-------
ra,dec : float
Sky coordinates (degrees)
"""
pixbox = numpy.array([pixel, pixel])
skybox = self.wcs.all_pix2world(pixbox, 1)
return [float(skybox[0][0]), float(skybox[0][1])] | Get the sky coordinates for a given image pixel.
Parameters
----------
pixel : (float, float)
Image coordinates.
Returns
-------
ra,dec : float
Sky coordinates (degrees) | Below is the the instruction that describes the task:
### Input:
Get the sky coordinates for a given image pixel.
Parameters
----------
pixel : (float, float)
Image coordinates.
Returns
-------
ra,dec : float
Sky coordinates (degrees)
### Response:
def pix2sky(self, pixel):
"""
Get the sky coordinates for a given image pixel.
Parameters
----------
pixel : (float, float)
Image coordinates.
Returns
-------
ra,dec : float
Sky coordinates (degrees)
"""
pixbox = numpy.array([pixel, pixel])
skybox = self.wcs.all_pix2world(pixbox, 1)
return [float(skybox[0][0]), float(skybox[0][1])] |
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset | Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2) | Below is the the instruction that describes the task:
### Input:
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
### Response:
def synset(self, synset_repr):
'''
Looks up a synset in GermaNet using its string representation.
Arguments:
- `synset_repr`: a unicode string containing the lemma, part
of speech, and sense number of the first lemma of the synset
>>> gn.synset(u'funktionieren.v.2')
Synset(funktionieren.v.2)
'''
parts = synset_repr.split('.')
if len(parts) != 3:
return None
lemma, pos, sensenum = parts
if not sensenum.isdigit() or pos not in SHORT_POS_TO_LONG:
return None
sensenum = int(sensenum, 10)
pos = SHORT_POS_TO_LONG[pos]
lemma_dict = self._mongo_db.lexunits.find_one({'orthForm': lemma,
'category': pos,
'sense': sensenum})
if lemma_dict:
return Lemma(self, lemma_dict).synset |
def _read_msg(self):
"""read message from server"""
#
# NOTE:
# '_recv_socket(nbytes)' was implemented as
# 'socket.recv(nbytes, socket.MSG_WAITALL)'
# but socket.MSG_WAITALL proved not reliable
#
def _recv_socket(nbytes):
"""read nbytes bytes from self.socket"""
#
# code below is written under the assumption that
# 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop
# is entered rarerly
#
try:
buf = self.socket.recv(nbytes)
except IOError as err:
raise ConnError(*err.args)
if not buf:
raise ShortRead(0, nbytes)
while len(buf) < nbytes:
try:
tmp = self.socket.recv(nbytes - len(buf))
except IOError as err:
raise ConnError(*err.args)
if not tmp:
if self.verbose:
print('ee', repr(buf))
raise ShortRead(len(buf), nbytes)
buf += tmp
assert len(buf) == nbytes, (buf, len(buf), nbytes)
return buf
data = _recv_socket(_FromServerHeader.header_size)
header = _FromServerHeader(data)
if self.verbose:
print('<-', repr(header))
# error conditions
if header.version != 0:
raise MalformedHeader('bad version', header)
if header.payload > MAX_PAYLOAD:
raise MalformedHeader('huge payload, unwilling to read', header)
if header.payload > 0:
payload = _recv_socket(header.payload)
if self.verbose:
print('..', repr(payload))
assert header.size <= header.payload
payload = payload[:header.size]
else:
payload = bytes()
return header, payload | read message from server | Below is the the instruction that describes the task:
### Input:
read message from server
### Response:
def _read_msg(self):
"""read message from server"""
#
# NOTE:
# '_recv_socket(nbytes)' was implemented as
# 'socket.recv(nbytes, socket.MSG_WAITALL)'
# but socket.MSG_WAITALL proved not reliable
#
def _recv_socket(nbytes):
"""read nbytes bytes from self.socket"""
#
# code below is written under the assumption that
# 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop
# is entered rarerly
#
try:
buf = self.socket.recv(nbytes)
except IOError as err:
raise ConnError(*err.args)
if not buf:
raise ShortRead(0, nbytes)
while len(buf) < nbytes:
try:
tmp = self.socket.recv(nbytes - len(buf))
except IOError as err:
raise ConnError(*err.args)
if not tmp:
if self.verbose:
print('ee', repr(buf))
raise ShortRead(len(buf), nbytes)
buf += tmp
assert len(buf) == nbytes, (buf, len(buf), nbytes)
return buf
data = _recv_socket(_FromServerHeader.header_size)
header = _FromServerHeader(data)
if self.verbose:
print('<-', repr(header))
# error conditions
if header.version != 0:
raise MalformedHeader('bad version', header)
if header.payload > MAX_PAYLOAD:
raise MalformedHeader('huge payload, unwilling to read', header)
if header.payload > 0:
payload = _recv_socket(header.payload)
if self.verbose:
print('..', repr(payload))
assert header.size <= header.payload
payload = payload[:header.size]
else:
payload = bytes()
return header, payload |
def predict(self, a, b):
""" Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
Returns:
float: test statistic
"""
a = np.array(a).reshape((-1, 1))
b = np.array(b).reshape((-1, 1))
return (mutual_info_regression(a, b.reshape((-1,))) + mutual_info_regression(b, a.reshape((-1,))))/2 | Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
Returns:
float: test statistic | Below is the the instruction that describes the task:
### Input:
Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
Returns:
float: test statistic
### Response:
def predict(self, a, b):
""" Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
Returns:
float: test statistic
"""
a = np.array(a).reshape((-1, 1))
b = np.array(b).reshape((-1, 1))
return (mutual_info_regression(a, b.reshape((-1,))) + mutual_info_regression(b, a.reshape((-1,))))/2 |
def statistics(self, elapsed, result):
"""
Return output for the combined time and result summary statistics.
"""
return "\n".join((self.timing(elapsed), self.result_summary(result))) | Return output for the combined time and result summary statistics. | Below is the the instruction that describes the task:
### Input:
Return output for the combined time and result summary statistics.
### Response:
def statistics(self, elapsed, result):
"""
Return output for the combined time and result summary statistics.
"""
return "\n".join((self.timing(elapsed), self.result_summary(result))) |
def unmanaged_cpcs(self):
"""
:class:`~zhmcclient.UnmanagedCpcManager`: Access to the unmanaged
:term:`CPCs <CPC>` in this Console.
"""
# We do here some lazy loading.
if not self._unmanaged_cpcs:
self._unmanaged_cpcs = UnmanagedCpcManager(self)
return self._unmanaged_cpcs | :class:`~zhmcclient.UnmanagedCpcManager`: Access to the unmanaged
:term:`CPCs <CPC>` in this Console. | Below is the the instruction that describes the task:
### Input:
:class:`~zhmcclient.UnmanagedCpcManager`: Access to the unmanaged
:term:`CPCs <CPC>` in this Console.
### Response:
def unmanaged_cpcs(self):
"""
:class:`~zhmcclient.UnmanagedCpcManager`: Access to the unmanaged
:term:`CPCs <CPC>` in this Console.
"""
# We do here some lazy loading.
if not self._unmanaged_cpcs:
self._unmanaged_cpcs = UnmanagedCpcManager(self)
return self._unmanaged_cpcs |
def t_ccomment_close(self, t):
r'\*\/'
t.lexer.ccomment_level -= 1
if t.lexer.ccomment_level == 0:
t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos + 1 - 3]
t.type = "CCOMMENT"
t.lexer.lineno += t.value.count('\n')
t.lexer.begin('INITIAL')
return t | r'\*\/ | Below is the the instruction that describes the task:
### Input:
r'\*\/
### Response:
def t_ccomment_close(self, t):
r'\*\/'
t.lexer.ccomment_level -= 1
if t.lexer.ccomment_level == 0:
t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos + 1 - 3]
t.type = "CCOMMENT"
t.lexer.lineno += t.value.count('\n')
t.lexer.begin('INITIAL')
return t |
def sample_out_dir(self):
"""Absolute path to permanent location in working directory
where EricScript output for the current sample will be stored.
(a subdirectory of `output_dir`)
"""
if self._sample_out_dir is None:
self._sample_out_dir = os.path.join(
self.output_dir, self._sample_name
)
return self._sample_out_dir | Absolute path to permanent location in working directory
where EricScript output for the current sample will be stored.
(a subdirectory of `output_dir`) | Below is the the instruction that describes the task:
### Input:
Absolute path to permanent location in working directory
where EricScript output for the current sample will be stored.
(a subdirectory of `output_dir`)
### Response:
def sample_out_dir(self):
"""Absolute path to permanent location in working directory
where EricScript output for the current sample will be stored.
(a subdirectory of `output_dir`)
"""
if self._sample_out_dir is None:
self._sample_out_dir = os.path.join(
self.output_dir, self._sample_name
)
return self._sample_out_dir |
async def start(self):
"""Start process execution."""
# Workaround for pylint issue #1469
# (https://github.com/PyCQA/pylint/issues/1469).
self.proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member
*shlex.split(self.command),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
self.stdout = self.proc.stdout
return self.proc.pid | Start process execution. | Below is the the instruction that describes the task:
### Input:
Start process execution.
### Response:
async def start(self):
"""Start process execution."""
# Workaround for pylint issue #1469
# (https://github.com/PyCQA/pylint/issues/1469).
self.proc = await subprocess.create_subprocess_exec( # pylint: disable=no-member
*shlex.split(self.command),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
self.stdout = self.proc.stdout
return self.proc.pid |
def list(self, virtual_host='/', show_all=False):
"""List Queues.
:param str virtual_host: Virtual host name
:param bool show_all: List all Queues
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
if show_all:
return self.http_client.get(API_QUEUES)
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_QUEUES_VIRTUAL_HOST % virtual_host
) | List Queues.
:param str virtual_host: Virtual host name
:param bool show_all: List all Queues
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list | Below is the the instruction that describes the task:
### Input:
List Queues.
:param str virtual_host: Virtual host name
:param bool show_all: List all Queues
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
### Response:
def list(self, virtual_host='/', show_all=False):
"""List Queues.
:param str virtual_host: Virtual host name
:param bool show_all: List all Queues
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
if show_all:
return self.http_client.get(API_QUEUES)
virtual_host = quote(virtual_host, '')
return self.http_client.get(
API_QUEUES_VIRTUAL_HOST % virtual_host
) |
def _choose_what_to_display(self, force_refresh=False):
"""
Choose what combination to display on the bar.
By default we try to display the active layout on the first run, else
we display the last selected combination.
"""
for _ in range(len(self.available_combinations)):
if (
self.displayed is None
and self.available_combinations[0] == self.active_layout
):
self.displayed = self.available_combinations[0]
break
else:
if self.displayed == self.available_combinations[0]:
break
else:
self.available_combinations.rotate(1)
else:
if force_refresh:
self.displayed = self.available_combinations[0]
else:
self.py3.log('xrandr error="displayed combination is not available"') | Choose what combination to display on the bar.
By default we try to display the active layout on the first run, else
we display the last selected combination. | Below is the the instruction that describes the task:
### Input:
Choose what combination to display on the bar.
By default we try to display the active layout on the first run, else
we display the last selected combination.
### Response:
def _choose_what_to_display(self, force_refresh=False):
"""
Choose what combination to display on the bar.
By default we try to display the active layout on the first run, else
we display the last selected combination.
"""
for _ in range(len(self.available_combinations)):
if (
self.displayed is None
and self.available_combinations[0] == self.active_layout
):
self.displayed = self.available_combinations[0]
break
else:
if self.displayed == self.available_combinations[0]:
break
else:
self.available_combinations.rotate(1)
else:
if force_refresh:
self.displayed = self.available_combinations[0]
else:
self.py3.log('xrandr error="displayed combination is not available"') |
def _process_exception(e, body, tb):
"""
Process informations about exception and send them thru AMQP.
Args:
e (obj): Exception instance.
body (str): Text which will be sent over AMQP.
tb (obj): Traceback object with informations, which will be put to the
headers.
"""
# get informations about message
msg = e.message if hasattr(e, "message") else str(e)
exception_type = str(e.__class__)
exception_name = str(e.__class__.__name__)
properties = pika.BasicProperties(
content_type="application/text",
delivery_mode=2,
headers={
"exception": msg,
"exception_type": exception_type,
"exception_name": exception_name,
"traceback": tb,
"UUID": str(uuid.uuid4())
}
)
send_message("harvester", body, properties=properties) | Process informations about exception and send them thru AMQP.
Args:
e (obj): Exception instance.
body (str): Text which will be sent over AMQP.
tb (obj): Traceback object with informations, which will be put to the
headers. | Below is the the instruction that describes the task:
### Input:
Process informations about exception and send them thru AMQP.
Args:
e (obj): Exception instance.
body (str): Text which will be sent over AMQP.
tb (obj): Traceback object with informations, which will be put to the
headers.
### Response:
def _process_exception(e, body, tb):
"""
Process informations about exception and send them thru AMQP.
Args:
e (obj): Exception instance.
body (str): Text which will be sent over AMQP.
tb (obj): Traceback object with informations, which will be put to the
headers.
"""
# get informations about message
msg = e.message if hasattr(e, "message") else str(e)
exception_type = str(e.__class__)
exception_name = str(e.__class__.__name__)
properties = pika.BasicProperties(
content_type="application/text",
delivery_mode=2,
headers={
"exception": msg,
"exception_type": exception_type,
"exception_name": exception_name,
"traceback": tb,
"UUID": str(uuid.uuid4())
}
)
send_message("harvester", body, properties=properties) |
def dispatch_commands(functions, *args, **kwargs):
"""
A wrapper for :func:`dispatch` that creates a parser, adds commands to
the parser and dispatches them.
Uses :attr:`PARSER_FORMATTER`.
This::
dispatch_commands([foo, bar])
...is a shortcut for::
parser = ArgumentParser()
add_commands(parser, [foo, bar])
dispatch(parser)
"""
parser = argparse.ArgumentParser(formatter_class=PARSER_FORMATTER)
add_commands(parser, functions)
dispatch(parser, *args, **kwargs) | A wrapper for :func:`dispatch` that creates a parser, adds commands to
the parser and dispatches them.
Uses :attr:`PARSER_FORMATTER`.
This::
dispatch_commands([foo, bar])
...is a shortcut for::
parser = ArgumentParser()
add_commands(parser, [foo, bar])
dispatch(parser) | Below is the the instruction that describes the task:
### Input:
A wrapper for :func:`dispatch` that creates a parser, adds commands to
the parser and dispatches them.
Uses :attr:`PARSER_FORMATTER`.
This::
dispatch_commands([foo, bar])
...is a shortcut for::
parser = ArgumentParser()
add_commands(parser, [foo, bar])
dispatch(parser)
### Response:
def dispatch_commands(functions, *args, **kwargs):
"""
A wrapper for :func:`dispatch` that creates a parser, adds commands to
the parser and dispatches them.
Uses :attr:`PARSER_FORMATTER`.
This::
dispatch_commands([foo, bar])
...is a shortcut for::
parser = ArgumentParser()
add_commands(parser, [foo, bar])
dispatch(parser)
"""
parser = argparse.ArgumentParser(formatter_class=PARSER_FORMATTER)
add_commands(parser, functions)
dispatch(parser, *args, **kwargs) |
def _line_by_type(self, line, header, hgroups, htypes, out, want_type,
collapse_quals_fn = None):
"""Parse out key value pairs for line information based on a group of values.
"""
for index, htype in ((i, t) for i, t in enumerate(htypes) if t == want_type):
col = hgroups[index][0]
key = header[col]#self._clean_header(header[col])
if collapse_quals_fn:
val = collapse_quals_fn(line, header, hgroups[index])
else:
val = line[col]
out[key].add(val)
return out | Parse out key value pairs for line information based on a group of values. | Below is the the instruction that describes the task:
### Input:
Parse out key value pairs for line information based on a group of values.
### Response:
def _line_by_type(self, line, header, hgroups, htypes, out, want_type,
collapse_quals_fn = None):
"""Parse out key value pairs for line information based on a group of values.
"""
for index, htype in ((i, t) for i, t in enumerate(htypes) if t == want_type):
col = hgroups[index][0]
key = header[col]#self._clean_header(header[col])
if collapse_quals_fn:
val = collapse_quals_fn(line, header, hgroups[index])
else:
val = line[col]
out[key].add(val)
return out |
def write_double(self, number):
""" Writes a double to the underlying output file as a 8-byte value. """
buf = pack(self.byte_order + "d", number)
self.write(buf) | Writes a double to the underlying output file as a 8-byte value. | Below is the the instruction that describes the task:
### Input:
Writes a double to the underlying output file as a 8-byte value.
### Response:
def write_double(self, number):
""" Writes a double to the underlying output file as a 8-byte value. """
buf = pack(self.byte_order + "d", number)
self.write(buf) |
def check_valid_cpc_status(method, uri, cpc):
"""
Check that the CPC is in a valid status, as indicated by its 'status'
property.
If the Cpc object does not have a 'status' property set, this function does
nothing (in order to make the mock support easy to use).
Raises:
ConflictError with reason 1: The CPC itself has been targeted by the
operation.
ConflictError with reason 6: The CPC is hosting the resource targeted by
the operation.
"""
status = cpc.properties.get('status', None)
if status is None:
# Do nothing if no status is set on the faked CPC
return
valid_statuses = ['active', 'service-required', 'degraded', 'exceptions']
if status not in valid_statuses:
if uri.startswith(cpc.uri):
# The uri targets the CPC (either is the CPC uri or some
# multiplicity under the CPC uri)
raise ConflictError(method, uri, reason=1,
message="The operation cannot be performed "
"because the targeted CPC {} has a status "
"that is not valid for the operation: {}".
format(cpc.name, status))
else:
# The uri targets a resource hosted by the CPC
raise ConflictError(method, uri, reason=6,
message="The operation cannot be performed "
"because CPC {} hosting the targeted resource "
"has a status that is not valid for the "
"operation: {}".
format(cpc.name, status)) | Check that the CPC is in a valid status, as indicated by its 'status'
property.
If the Cpc object does not have a 'status' property set, this function does
nothing (in order to make the mock support easy to use).
Raises:
ConflictError with reason 1: The CPC itself has been targeted by the
operation.
ConflictError with reason 6: The CPC is hosting the resource targeted by
the operation. | Below is the the instruction that describes the task:
### Input:
Check that the CPC is in a valid status, as indicated by its 'status'
property.
If the Cpc object does not have a 'status' property set, this function does
nothing (in order to make the mock support easy to use).
Raises:
ConflictError with reason 1: The CPC itself has been targeted by the
operation.
ConflictError with reason 6: The CPC is hosting the resource targeted by
the operation.
### Response:
def check_valid_cpc_status(method, uri, cpc):
"""
Check that the CPC is in a valid status, as indicated by its 'status'
property.
If the Cpc object does not have a 'status' property set, this function does
nothing (in order to make the mock support easy to use).
Raises:
ConflictError with reason 1: The CPC itself has been targeted by the
operation.
ConflictError with reason 6: The CPC is hosting the resource targeted by
the operation.
"""
status = cpc.properties.get('status', None)
if status is None:
# Do nothing if no status is set on the faked CPC
return
valid_statuses = ['active', 'service-required', 'degraded', 'exceptions']
if status not in valid_statuses:
if uri.startswith(cpc.uri):
# The uri targets the CPC (either is the CPC uri or some
# multiplicity under the CPC uri)
raise ConflictError(method, uri, reason=1,
message="The operation cannot be performed "
"because the targeted CPC {} has a status "
"that is not valid for the operation: {}".
format(cpc.name, status))
else:
# The uri targets a resource hosted by the CPC
raise ConflictError(method, uri, reason=6,
message="The operation cannot be performed "
"because CPC {} hosting the targeted resource "
"has a status that is not valid for the "
"operation: {}".
format(cpc.name, status)) |
def time_elapsed(func):
"""
记录函数运行耗时的生成器
:param func:
:return:
"""
@wraps(func)
def wrapper(*args, **kwargs):
timestamp = time.time() * 1000
ret = func(*args, **kwargs)
now_ts = time.time() * 1000
elapsed = now_ts - timestamp
print('%s costs time: %.2fms' % (func.__name__, elapsed))
return ret
return wrapper | 记录函数运行耗时的生成器
:param func:
:return: | Below is the the instruction that describes the task:
### Input:
记录函数运行耗时的生成器
:param func:
:return:
### Response:
def time_elapsed(func):
"""
记录函数运行耗时的生成器
:param func:
:return:
"""
@wraps(func)
def wrapper(*args, **kwargs):
timestamp = time.time() * 1000
ret = func(*args, **kwargs)
now_ts = time.time() * 1000
elapsed = now_ts - timestamp
print('%s costs time: %.2fms' % (func.__name__, elapsed))
return ret
return wrapper |
async def stop(self):
"""
Stop recording.
"""
if self.__container:
for track, context in self.__tracks.items():
if context.task is not None:
context.task.cancel()
context.task = None
for packet in context.stream.encode(None):
self.__container.mux(packet)
self.__tracks = {}
if self.__container:
self.__container.close()
self.__container = None | Stop recording. | Below is the the instruction that describes the task:
### Input:
Stop recording.
### Response:
async def stop(self):
"""
Stop recording.
"""
if self.__container:
for track, context in self.__tracks.items():
if context.task is not None:
context.task.cancel()
context.task = None
for packet in context.stream.encode(None):
self.__container.mux(packet)
self.__tracks = {}
if self.__container:
self.__container.close()
self.__container = None |
def store_records_for_package(self, entry_point, records):
"""
Store the records in a way that permit lookup by package
"""
# If provided records already exist in the module mapping list,
# it likely means that a package declared multiple keys for the
# same package namespace; while normally this does not happen,
# this default implementation make no assumptions as to whether
# or not this is permitted.
pkg_module_records = self._dist_to_package_module_map(entry_point)
pkg_module_records.extend(records) | Store the records in a way that permit lookup by package | Below is the the instruction that describes the task:
### Input:
Store the records in a way that permit lookup by package
### Response:
def store_records_for_package(self, entry_point, records):
"""
Store the records in a way that permit lookup by package
"""
# If provided records already exist in the module mapping list,
# it likely means that a package declared multiple keys for the
# same package namespace; while normally this does not happen,
# this default implementation make no assumptions as to whether
# or not this is permitted.
pkg_module_records = self._dist_to_package_module_map(entry_point)
pkg_module_records.extend(records) |
def check_sentence_spacing(text):
"""Use no more than two spaces after a period."""
err = "typography.symbols.sentence_spacing"
msg = u"More than two spaces after the period; use 1 or 2."
regex = "\. {3}"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) | Use no more than two spaces after a period. | Below is the the instruction that describes the task:
### Input:
Use no more than two spaces after a period.
### Response:
def check_sentence_spacing(text):
"""Use no more than two spaces after a period."""
err = "typography.symbols.sentence_spacing"
msg = u"More than two spaces after the period; use 1 or 2."
regex = "\. {3}"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) |
def t_TOKEN(t):
'[a-zA-Z0-9]+'
#print t.value,t.lexer.lexdata[t.lexer.lexpos-len(t.value):],re_TYPE.match(t.lexer.lexdata,t.lexer.lexpos-len(t.value))
if re_TYPE.match(t.value):
t.type = 'TYPE'
elif re_PTR.match(t.value):
t.type = 'PTR'
elif re_NUMBER.match(t.value):
if t.value.startswith('0x'):
t.value = t.value[2:]
t.value = int(t.value, 16)
t.type = 'NUMBER'
elif re_REGISTER.match(t.value):
t.type = 'REGISTER'
elif re_SEGMENT.match(t.value):
t.type = 'SEGMENT'
else:
raise Exception(f"Unknown:<{t.value}>")
return t | [a-zA-Z0-9]+ | Below is the the instruction that describes the task:
### Input:
[a-zA-Z0-9]+
### Response:
def t_TOKEN(t):
'[a-zA-Z0-9]+'
#print t.value,t.lexer.lexdata[t.lexer.lexpos-len(t.value):],re_TYPE.match(t.lexer.lexdata,t.lexer.lexpos-len(t.value))
if re_TYPE.match(t.value):
t.type = 'TYPE'
elif re_PTR.match(t.value):
t.type = 'PTR'
elif re_NUMBER.match(t.value):
if t.value.startswith('0x'):
t.value = t.value[2:]
t.value = int(t.value, 16)
t.type = 'NUMBER'
elif re_REGISTER.match(t.value):
t.type = 'REGISTER'
elif re_SEGMENT.match(t.value):
t.type = 'SEGMENT'
else:
raise Exception(f"Unknown:<{t.value}>")
return t |
def fetchmany(self, size = None):
"""
As in DBAPI2.0 (except the fact rows are not tuples but
lists so if you try to modify them, you will succeed instead of
the correct behavior that would be that an exception would have
been raised)
Additionally every row returned by this class is addressable
by column name besides the column position in the query.
"""
try:
if size is None:
manyrows = self.__dbapi2_cursor.fetchmany()
else:
manyrows = self.__dbapi2_cursor.fetchmany(size)
except Exception, e:
self.__connection.reconnect(None, self.__log_reconnect)
self.__dbapi2_cursor = self.__connection._get_raw_cursor()
if size is None:
manyrows = self.__dbapi2_cursor.fetchmany()
else:
manyrows = self.__dbapi2_cursor.fetchmany(size)
if not manyrows:
return manyrows
else:
return [self.row(self.__col2idx_map, dbapi2_row) for dbapi2_row in manyrows] | As in DBAPI2.0 (except the fact rows are not tuples but
lists so if you try to modify them, you will succeed instead of
the correct behavior that would be that an exception would have
been raised)
Additionally every row returned by this class is addressable
by column name besides the column position in the query. | Below is the the instruction that describes the task:
### Input:
As in DBAPI2.0 (except the fact rows are not tuples but
lists so if you try to modify them, you will succeed instead of
the correct behavior that would be that an exception would have
been raised)
Additionally every row returned by this class is addressable
by column name besides the column position in the query.
### Response:
def fetchmany(self, size = None):
"""
As in DBAPI2.0 (except the fact rows are not tuples but
lists so if you try to modify them, you will succeed instead of
the correct behavior that would be that an exception would have
been raised)
Additionally every row returned by this class is addressable
by column name besides the column position in the query.
"""
try:
if size is None:
manyrows = self.__dbapi2_cursor.fetchmany()
else:
manyrows = self.__dbapi2_cursor.fetchmany(size)
except Exception, e:
self.__connection.reconnect(None, self.__log_reconnect)
self.__dbapi2_cursor = self.__connection._get_raw_cursor()
if size is None:
manyrows = self.__dbapi2_cursor.fetchmany()
else:
manyrows = self.__dbapi2_cursor.fetchmany(size)
if not manyrows:
return manyrows
else:
return [self.row(self.__col2idx_map, dbapi2_row) for dbapi2_row in manyrows] |
def pix2ang(nside, ipix, nest=False, lonlat=False):
"""Drop-in replacement for healpy `~healpy.pixelfunc.pix2ang`."""
lon, lat = healpix_to_lonlat(ipix, nside, order='nested' if nest else 'ring')
return _lonlat_to_healpy(lon, lat, lonlat=lonlat) | Drop-in replacement for healpy `~healpy.pixelfunc.pix2ang`. | Below is the the instruction that describes the task:
### Input:
Drop-in replacement for healpy `~healpy.pixelfunc.pix2ang`.
### Response:
def pix2ang(nside, ipix, nest=False, lonlat=False):
"""Drop-in replacement for healpy `~healpy.pixelfunc.pix2ang`."""
lon, lat = healpix_to_lonlat(ipix, nside, order='nested' if nest else 'ring')
return _lonlat_to_healpy(lon, lat, lonlat=lonlat) |
def distance_to_tile(self, point, direction, length = 50):
"""
Find nearest wall on a given bearing.
Used for agent wall sensors.
"""
assert isinstance(point, eu.Vector2)
assert isinstance(direction, int) or isinstance(direction, float)
assert isinstance(length, int) or isinstance(length, float)
# Recursive dead-reckoning to next tile
# Given `point`, look for where intersects with next boundary (`y % 10`) in `direction`
def search_grid(search, rad, distance = 0, depth = 10):
assert isinstance(search, eu.Vector2)
assert isinstance(rad, float)
if depth == 0:
return distance
depth -= 1
# Exit if outside window.
if abs(search.x) > self.width or abs(search.y) > self.height:
return distance
m = math.tan(rad) # Slope
sin = math.sin(rad)
cos = math.cos(rad)
#print(sin, cos)
top = (cos > 0)
bottom = (cos < 0)
left = (sin < 0)
right = (sin > 0)
start = eu.Vector2(search.x, search.y)
ends = eu.Vector2()
# Helper function
# FIXME: Does MapLayer provide something better? Neighbours?
# Find next grid on given axis
def get_boundary(axis, increasing):
assert (isinstance(axis, str) or isinstance(axis, unicode)) and (axis == 'x' or axis == 'y')
if axis == 'x':
tile = self.map_layer.tw
position = search.x
elif axis == 'y':
tile = self.map_layer.th
position = search.y
# Set bound to next tile on axis
# Offset next search by one pixel into tile
bound = (position % tile)
if increasing:
bound = tile - bound
bound = position + bound
offset = 1
else:
bound = position - bound
offset = -1
# Find intersect
if axis == 'x':
intersect = ((bound - search.x) / m) + search.y
return eu.Vector2(bound+offset, intersect)
elif axis == 'y':
intersect = -m * (search.y - bound) + search.x
return eu.Vector2(intersect, bound+offset)
# End Helper
if top or bottom:
ends.y = get_boundary('y', top)
ends.y.y = min(ends.y.y, self.height)
if left or right:
ends.x = get_boundary('x', right)
ends.x.x = min(ends.x.x, self.width)
# Get shortest collision between axis
lengths = eu.Vector2(0, 0)
if type(ends.x) == eu.Vector2:
diff = start - ends.x
lengths.x = math.sqrt(diff.dot(diff))
if type(ends.y) == eu.Vector2:
diff = start - ends.y
lengths.y = math.sqrt(diff.dot(diff))
end = None
# Find shortest boundary intersect
index_min = min(xrange(len(lengths)), key=lengths.__getitem__)
if lengths[index_min] > 0:
distance += lengths[index_min]
end = ends[index_min]
if end:
cell = self.map_layer.get_at_pixel(end.x, end.y)
if not cell or not cell.tile or not cell.tile.id > 0:
# Recurse
return search_grid(end, rad, distance, depth)
return distance
# End Helper
# Start at `point`, check tile under each pixel
return search_grid(point, direction) | Find nearest wall on a given bearing.
Used for agent wall sensors. | Below is the the instruction that describes the task:
### Input:
Find nearest wall on a given bearing.
Used for agent wall sensors.
### Response:
def distance_to_tile(self, point, direction, length = 50):
"""
Find nearest wall on a given bearing.
Used for agent wall sensors.
"""
assert isinstance(point, eu.Vector2)
assert isinstance(direction, int) or isinstance(direction, float)
assert isinstance(length, int) or isinstance(length, float)
# Recursive dead-reckoning to next tile
# Given `point`, look for where intersects with next boundary (`y % 10`) in `direction`
def search_grid(search, rad, distance = 0, depth = 10):
assert isinstance(search, eu.Vector2)
assert isinstance(rad, float)
if depth == 0:
return distance
depth -= 1
# Exit if outside window.
if abs(search.x) > self.width or abs(search.y) > self.height:
return distance
m = math.tan(rad) # Slope
sin = math.sin(rad)
cos = math.cos(rad)
#print(sin, cos)
top = (cos > 0)
bottom = (cos < 0)
left = (sin < 0)
right = (sin > 0)
start = eu.Vector2(search.x, search.y)
ends = eu.Vector2()
# Helper function
# FIXME: Does MapLayer provide something better? Neighbours?
# Find next grid on given axis
def get_boundary(axis, increasing):
assert (isinstance(axis, str) or isinstance(axis, unicode)) and (axis == 'x' or axis == 'y')
if axis == 'x':
tile = self.map_layer.tw
position = search.x
elif axis == 'y':
tile = self.map_layer.th
position = search.y
# Set bound to next tile on axis
# Offset next search by one pixel into tile
bound = (position % tile)
if increasing:
bound = tile - bound
bound = position + bound
offset = 1
else:
bound = position - bound
offset = -1
# Find intersect
if axis == 'x':
intersect = ((bound - search.x) / m) + search.y
return eu.Vector2(bound+offset, intersect)
elif axis == 'y':
intersect = -m * (search.y - bound) + search.x
return eu.Vector2(intersect, bound+offset)
# End Helper
if top or bottom:
ends.y = get_boundary('y', top)
ends.y.y = min(ends.y.y, self.height)
if left or right:
ends.x = get_boundary('x', right)
ends.x.x = min(ends.x.x, self.width)
# Get shortest collision between axis
lengths = eu.Vector2(0, 0)
if type(ends.x) == eu.Vector2:
diff = start - ends.x
lengths.x = math.sqrt(diff.dot(diff))
if type(ends.y) == eu.Vector2:
diff = start - ends.y
lengths.y = math.sqrt(diff.dot(diff))
end = None
# Find shortest boundary intersect
index_min = min(xrange(len(lengths)), key=lengths.__getitem__)
if lengths[index_min] > 0:
distance += lengths[index_min]
end = ends[index_min]
if end:
cell = self.map_layer.get_at_pixel(end.x, end.y)
if not cell or not cell.tile or not cell.tile.id > 0:
# Recurse
return search_grid(end, rad, distance, depth)
return distance
# End Helper
# Start at `point`, check tile under each pixel
return search_grid(point, direction) |
def provider_factory(factory=_sentinel, scope=NoneScope):
'''
Decorator to create a provider using the given factory, and scope.
Can also be used in a non-decorator manner.
:param scope: Scope key, factory, or instance
:type scope: object or callable
:return: decorator
:rtype: decorator
'''
if factory is _sentinel:
return functools.partial(provider_factory, scope=scope)
provider = Provider(factory, scope)
return provider | Decorator to create a provider using the given factory, and scope.
Can also be used in a non-decorator manner.
:param scope: Scope key, factory, or instance
:type scope: object or callable
:return: decorator
:rtype: decorator | Below is the the instruction that describes the task:
### Input:
Decorator to create a provider using the given factory, and scope.
Can also be used in a non-decorator manner.
:param scope: Scope key, factory, or instance
:type scope: object or callable
:return: decorator
:rtype: decorator
### Response:
def provider_factory(factory=_sentinel, scope=NoneScope):
'''
Decorator to create a provider using the given factory, and scope.
Can also be used in a non-decorator manner.
:param scope: Scope key, factory, or instance
:type scope: object or callable
:return: decorator
:rtype: decorator
'''
if factory is _sentinel:
return functools.partial(provider_factory, scope=scope)
provider = Provider(factory, scope)
return provider |
def get_connection_state(self, connection: str) -> Dict[str, Any]:
"""
For an already established connection return its state.
"""
if connection not in self.connections:
raise ConnectionNotOpen(connection)
return self.connections[connection].state | For an already established connection return its state. | Below is the the instruction that describes the task:
### Input:
For an already established connection return its state.
### Response:
def get_connection_state(self, connection: str) -> Dict[str, Any]:
"""
For an already established connection return its state.
"""
if connection not in self.connections:
raise ConnectionNotOpen(connection)
return self.connections[connection].state |
def plot_sn_discovery_map(log,
snSurveyDiscoveryTimes,
peakAppMagList,
snCampaignLengthList,
redshifts,
extraSurveyConstraints,
pathToOutputPlotFolder):
"""
*Plot the SN discoveries in a polar plot as function of redshift & time*
**Key Arguments:**
- ``log`` -- logger
- ``snSurveyDiscoveryTimes`` --
- ``peakAppMagList`` --
- ``snCampaignLengthList`` -- a list of campaign lengths in each filter
- ``redshifts`` --
- ``extraSurveyConstraints`` --
- ``pathToOutputPlotDirectory`` -- path to add plots to
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
import sys
## THIRD PARTY ##
import matplotlib.pyplot as plt
import numpy as np
## LOCAL APPLICATION ##
filters = ['g', 'r', 'i', 'z']
lunarMonth = 29.3
surveyYear = 12. * lunarMonth
faintMagLimit = extraSurveyConstraints['Faint-Limit of Peak Magnitude']
################ >ACTION(S) ################
discovered = []
tooFaint = []
shortCampaign = []
discoveredRedshift = []
tooFaintRedshift = []
notDiscoveredRedshift = []
shortCampaignRedshift = []
#log.info('len(redshifts) %s' % (len(redshifts),))
for item in range(len(redshifts)):
if snSurveyDiscoveryTimes[item]['any'] is True:
discoveryDayList = []
faintDayList = []
shortCampaignDayList = []
for ffilter in filters:
if snSurveyDiscoveryTimes[item][ffilter]:
if peakAppMagList[item][ffilter] < faintMagLimit:
if snCampaignLengthList[item]['max'] < extraSurveyConstraints['Observable for at least ? number of days']:
shortCampaignDayList.append(
snSurveyDiscoveryTimes[item][ffilter])
else:
discoveryDayList.append(
snSurveyDiscoveryTimes[item][ffilter])
else:
faintDayList.append(
snSurveyDiscoveryTimes[item][ffilter])
if len(discoveryDayList) > 0:
discovered.append(min(discoveryDayList))
discoveredRedshift.append(redshifts[item])
elif len(shortCampaignDayList) > 0:
shortCampaign.append(min(shortCampaignDayList))
shortCampaignRedshift.append(redshifts[item])
else:
tooFaint.append(min(faintDayList))
tooFaintRedshift.append(redshifts[item])
else:
notDiscoveredRedshift.append(redshifts[item])
################ >ACTION(S) ################
colors = [
{'red': '#dc322f'},
{'blue': '#268bd2'},
{'green': '#859900'},
{'orange': '#cb4b16'},
{'gray': '#93a1a1'},
{'violet': '#6c71c4'},
{'cyan': '#2aa198'},
{'magenta': '#d33682'},
{'yellow': '#b58900'}
]
# FORCE SQUARE FIGURE AND SQUARE AXES LOOKS BETTER FOR POLAR
fig = plt.figure(
num=None,
figsize=(8, 8),
dpi=None,
facecolor=None,
edgecolor=None,
frameon=True)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
polar=True,
frameon=False)
maxInList = max(redshifts) * 1.1
ax.set_ylim(0, maxInList)
# ax.get_xaxis().set_visible(False)
circleTicks = np.arange(0, 350, 30)
tickLabels = []
for tick in circleTicks:
tickLabels.append("%s days" % (tick,))
plt.xticks(2 * np.pi * circleTicks / 360., tickLabels)
discovered = 2 * np.pi * np.array(discovered) / surveyYear
discoveredRedshift = np.array(discoveredRedshift)
tooFaint = 2 * np.pi * np.array(tooFaint) / surveyYear
tooFaintRedshift = np.array(tooFaintRedshift)
shortCampaign = 2 * np.pi * np.array(shortCampaign) / surveyYear
shortCampaignRedshift = np.array(shortCampaignRedshift)
plt.scatter(
tooFaint,
tooFaintRedshift,
label="""Detected - too faint to constrain as transient""",
s=50,
c='#dc322f',
marker='o',
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=0.2,
linewidths=None,
edgecolor='#657b83',
verts=None,
hold=True)
plt.scatter(
discovered,
discoveredRedshift,
label='Discovered transient',
s=50,
c='#859900',
marker='o',
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=0.2,
linewidths=None,
edgecolor='#657b83',
verts=None,
hold=True)
plt.scatter(
shortCampaign,
shortCampaignRedshift,
label="""Detected - campaign to short to constrain as transient""",
s=50,
c='#268bd2',
marker='o',
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=0.2,
linewidths=None,
edgecolor='#657b83',
verts=None,
hold=True)
title = "transients Detected Within the Suvrey Year"
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(0.7, -0.1), prop={'size': 8})
plt.grid(True)
plt.title(title, fontsize='small',
verticalalignment='bottom', linespacing=0.2)
fileName = pathToOutputPlotFolder + title.replace(" ", "_") + ".png"
imageLink = """

""" % (title.replace(" ", "_"), fileName)
plt.savefig(fileName)
plt.clf() # clear figure
return imageLink | *Plot the SN discoveries in a polar plot as function of redshift & time*
**Key Arguments:**
- ``log`` -- logger
- ``snSurveyDiscoveryTimes`` --
- ``peakAppMagList`` --
- ``snCampaignLengthList`` -- a list of campaign lengths in each filter
- ``redshifts`` --
- ``extraSurveyConstraints`` --
- ``pathToOutputPlotDirectory`` -- path to add plots to
**Return:**
- None | Below is the the instruction that describes the task:
### Input:
*Plot the SN discoveries in a polar plot as function of redshift & time*
**Key Arguments:**
- ``log`` -- logger
- ``snSurveyDiscoveryTimes`` --
- ``peakAppMagList`` --
- ``snCampaignLengthList`` -- a list of campaign lengths in each filter
- ``redshifts`` --
- ``extraSurveyConstraints`` --
- ``pathToOutputPlotDirectory`` -- path to add plots to
**Return:**
- None
### Response:
def plot_sn_discovery_map(log,
snSurveyDiscoveryTimes,
peakAppMagList,
snCampaignLengthList,
redshifts,
extraSurveyConstraints,
pathToOutputPlotFolder):
"""
*Plot the SN discoveries in a polar plot as function of redshift & time*
**Key Arguments:**
- ``log`` -- logger
- ``snSurveyDiscoveryTimes`` --
- ``peakAppMagList`` --
- ``snCampaignLengthList`` -- a list of campaign lengths in each filter
- ``redshifts`` --
- ``extraSurveyConstraints`` --
- ``pathToOutputPlotDirectory`` -- path to add plots to
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
import sys
## THIRD PARTY ##
import matplotlib.pyplot as plt
import numpy as np
## LOCAL APPLICATION ##
filters = ['g', 'r', 'i', 'z']
lunarMonth = 29.3
surveyYear = 12. * lunarMonth
faintMagLimit = extraSurveyConstraints['Faint-Limit of Peak Magnitude']
################ >ACTION(S) ################
discovered = []
tooFaint = []
shortCampaign = []
discoveredRedshift = []
tooFaintRedshift = []
notDiscoveredRedshift = []
shortCampaignRedshift = []
#log.info('len(redshifts) %s' % (len(redshifts),))
for item in range(len(redshifts)):
if snSurveyDiscoveryTimes[item]['any'] is True:
discoveryDayList = []
faintDayList = []
shortCampaignDayList = []
for ffilter in filters:
if snSurveyDiscoveryTimes[item][ffilter]:
if peakAppMagList[item][ffilter] < faintMagLimit:
if snCampaignLengthList[item]['max'] < extraSurveyConstraints['Observable for at least ? number of days']:
shortCampaignDayList.append(
snSurveyDiscoveryTimes[item][ffilter])
else:
discoveryDayList.append(
snSurveyDiscoveryTimes[item][ffilter])
else:
faintDayList.append(
snSurveyDiscoveryTimes[item][ffilter])
if len(discoveryDayList) > 0:
discovered.append(min(discoveryDayList))
discoveredRedshift.append(redshifts[item])
elif len(shortCampaignDayList) > 0:
shortCampaign.append(min(shortCampaignDayList))
shortCampaignRedshift.append(redshifts[item])
else:
tooFaint.append(min(faintDayList))
tooFaintRedshift.append(redshifts[item])
else:
notDiscoveredRedshift.append(redshifts[item])
################ >ACTION(S) ################
colors = [
{'red': '#dc322f'},
{'blue': '#268bd2'},
{'green': '#859900'},
{'orange': '#cb4b16'},
{'gray': '#93a1a1'},
{'violet': '#6c71c4'},
{'cyan': '#2aa198'},
{'magenta': '#d33682'},
{'yellow': '#b58900'}
]
# FORCE SQUARE FIGURE AND SQUARE AXES LOOKS BETTER FOR POLAR
fig = plt.figure(
num=None,
figsize=(8, 8),
dpi=None,
facecolor=None,
edgecolor=None,
frameon=True)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
polar=True,
frameon=False)
maxInList = max(redshifts) * 1.1
ax.set_ylim(0, maxInList)
# ax.get_xaxis().set_visible(False)
circleTicks = np.arange(0, 350, 30)
tickLabels = []
for tick in circleTicks:
tickLabels.append("%s days" % (tick,))
plt.xticks(2 * np.pi * circleTicks / 360., tickLabels)
discovered = 2 * np.pi * np.array(discovered) / surveyYear
discoveredRedshift = np.array(discoveredRedshift)
tooFaint = 2 * np.pi * np.array(tooFaint) / surveyYear
tooFaintRedshift = np.array(tooFaintRedshift)
shortCampaign = 2 * np.pi * np.array(shortCampaign) / surveyYear
shortCampaignRedshift = np.array(shortCampaignRedshift)
plt.scatter(
tooFaint,
tooFaintRedshift,
label="""Detected - too faint to constrain as transient""",
s=50,
c='#dc322f',
marker='o',
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=0.2,
linewidths=None,
edgecolor='#657b83',
verts=None,
hold=True)
plt.scatter(
discovered,
discoveredRedshift,
label='Discovered transient',
s=50,
c='#859900',
marker='o',
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=0.2,
linewidths=None,
edgecolor='#657b83',
verts=None,
hold=True)
plt.scatter(
shortCampaign,
shortCampaignRedshift,
label="""Detected - campaign to short to constrain as transient""",
s=50,
c='#268bd2',
marker='o',
cmap=None,
norm=None,
vmin=None,
vmax=None,
alpha=0.2,
linewidths=None,
edgecolor='#657b83',
verts=None,
hold=True)
title = "transients Detected Within the Suvrey Year"
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(0.7, -0.1), prop={'size': 8})
plt.grid(True)
plt.title(title, fontsize='small',
verticalalignment='bottom', linespacing=0.2)
fileName = pathToOutputPlotFolder + title.replace(" ", "_") + ".png"
imageLink = """

""" % (title.replace(" ", "_"), fileName)
plt.savefig(fileName)
plt.clf() # clear figure
return imageLink |
def update(self, result, spec):
"""Replace elements with results of calling callables."""
if isinstance(spec, dict):
if spec:
spec_value = next(iter(spec.values()))
for key, value in result.items():
result[key] = self.update(value, spec_value)
if isinstance(spec, list):
if spec:
for i, value in enumerate(result):
result[i] = self.update(value, spec[0])
if isinstance(spec, tuple):
return tuple(self.update(value, s)
for s, value in zip(spec, result))
if callable(spec):
return spec(result)
return result | Replace elements with results of calling callables. | Below is the the instruction that describes the task:
### Input:
Replace elements with results of calling callables.
### Response:
def update(self, result, spec):
"""Replace elements with results of calling callables."""
if isinstance(spec, dict):
if spec:
spec_value = next(iter(spec.values()))
for key, value in result.items():
result[key] = self.update(value, spec_value)
if isinstance(spec, list):
if spec:
for i, value in enumerate(result):
result[i] = self.update(value, spec[0])
if isinstance(spec, tuple):
return tuple(self.update(value, s)
for s, value in zip(spec, result))
if callable(spec):
return spec(result)
return result |
def _handle_execute_reply(self, msg):
"""
Reimplemented to handle communications between Spyder
and the kernel
"""
msg_id = msg['parent_header']['msg_id']
info = self._request_info['execute'].get(msg_id)
# unset reading flag, because if execute finished, raw_input can't
# still be pending.
self._reading = False
# Refresh namespacebrowser after the kernel starts running
exec_count = msg['content'].get('execution_count', '')
if exec_count == 0 and self._kernel_is_starting:
if self.namespacebrowser is not None:
self.set_namespace_view_settings()
self.refresh_namespacebrowser()
self._kernel_is_starting = False
self.ipyclient.t0 = time.monotonic()
# Handle silent execution of kernel methods
if info and info.kind == 'silent_exec_method' and not self._hidden:
self.handle_exec_method(msg)
self._request_info['execute'].pop(msg_id)
else:
super(NamepaceBrowserWidget, self)._handle_execute_reply(msg) | Reimplemented to handle communications between Spyder
and the kernel | Below is the the instruction that describes the task:
### Input:
Reimplemented to handle communications between Spyder
and the kernel
### Response:
def _handle_execute_reply(self, msg):
"""
Reimplemented to handle communications between Spyder
and the kernel
"""
msg_id = msg['parent_header']['msg_id']
info = self._request_info['execute'].get(msg_id)
# unset reading flag, because if execute finished, raw_input can't
# still be pending.
self._reading = False
# Refresh namespacebrowser after the kernel starts running
exec_count = msg['content'].get('execution_count', '')
if exec_count == 0 and self._kernel_is_starting:
if self.namespacebrowser is not None:
self.set_namespace_view_settings()
self.refresh_namespacebrowser()
self._kernel_is_starting = False
self.ipyclient.t0 = time.monotonic()
# Handle silent execution of kernel methods
if info and info.kind == 'silent_exec_method' and not self._hidden:
self.handle_exec_method(msg)
self._request_info['execute'].pop(msg_id)
else:
super(NamepaceBrowserWidget, self)._handle_execute_reply(msg) |
def _dump_inline_table(section):
"""Preserve inline table in its compact syntax instead of expanding
into subsection.
https://github.com/toml-lang/toml#user-content-inline-table
"""
retval = ""
if isinstance(section, dict):
val_list = []
for k, v in section.items():
val = _dump_inline_table(v)
val_list.append(k + " = " + val)
retval += "{ " + ", ".join(val_list) + " }\n"
return retval
else:
return str(_dump_value(section)) | Preserve inline table in its compact syntax instead of expanding
into subsection.
https://github.com/toml-lang/toml#user-content-inline-table | Below is the the instruction that describes the task:
### Input:
Preserve inline table in its compact syntax instead of expanding
into subsection.
https://github.com/toml-lang/toml#user-content-inline-table
### Response:
def _dump_inline_table(section):
"""Preserve inline table in its compact syntax instead of expanding
into subsection.
https://github.com/toml-lang/toml#user-content-inline-table
"""
retval = ""
if isinstance(section, dict):
val_list = []
for k, v in section.items():
val = _dump_inline_table(v)
val_list.append(k + " = " + val)
retval += "{ " + ", ".join(val_list) + " }\n"
return retval
else:
return str(_dump_value(section)) |
def _get_internal_max_value(self):
"""
This is supposed to be only used by fitting engines to get the maximum value in internal representation.
It is supposed to be called only once before doing the minimization/sampling, to set the range of the parameter
:return: maximum value in internal representation (or None if there is no minimum)
"""
if self.max_value is None:
# No minimum set
return None
else:
# There is a minimum. If there is a transformation, use it, otherwise just return the minimum
if self._transformation is None:
return self._external_max_value
else:
return self._transformation.forward(self._external_max_value) | This is supposed to be only used by fitting engines to get the maximum value in internal representation.
It is supposed to be called only once before doing the minimization/sampling, to set the range of the parameter
:return: maximum value in internal representation (or None if there is no minimum) | Below is the the instruction that describes the task:
### Input:
This is supposed to be only used by fitting engines to get the maximum value in internal representation.
It is supposed to be called only once before doing the minimization/sampling, to set the range of the parameter
:return: maximum value in internal representation (or None if there is no minimum)
### Response:
def _get_internal_max_value(self):
"""
This is supposed to be only used by fitting engines to get the maximum value in internal representation.
It is supposed to be called only once before doing the minimization/sampling, to set the range of the parameter
:return: maximum value in internal representation (or None if there is no minimum)
"""
if self.max_value is None:
# No minimum set
return None
else:
# There is a minimum. If there is a transformation, use it, otherwise just return the minimum
if self._transformation is None:
return self._external_max_value
else:
return self._transformation.forward(self._external_max_value) |
def has_family_notes(family, data_dir=None):
'''Check if notes exist for a given family
Returns True if they exist, false otherwise
'''
file_path = _family_notes_path(family, data_dir)
return os.path.isfile(file_path) | Check if notes exist for a given family
Returns True if they exist, false otherwise | Below is the the instruction that describes the task:
### Input:
Check if notes exist for a given family
Returns True if they exist, false otherwise
### Response:
def has_family_notes(family, data_dir=None):
'''Check if notes exist for a given family
Returns True if they exist, false otherwise
'''
file_path = _family_notes_path(family, data_dir)
return os.path.isfile(file_path) |
def get_cached_commit_times(root_folder, parent_dir, sorted_relpaths):
"""
Get the cached commit times for the combination of this parent_dir and relpaths
Return the commit assigned to this combination and the actual times!
"""
result = get_all_cached_commit_times(root_folder)
for item in result:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
return item.get("commit"), item.get("commit_times")
return None, {} | Get the cached commit times for the combination of this parent_dir and relpaths
Return the commit assigned to this combination and the actual times! | Below is the the instruction that describes the task:
### Input:
Get the cached commit times for the combination of this parent_dir and relpaths
Return the commit assigned to this combination and the actual times!
### Response:
def get_cached_commit_times(root_folder, parent_dir, sorted_relpaths):
"""
Get the cached commit times for the combination of this parent_dir and relpaths
Return the commit assigned to this combination and the actual times!
"""
result = get_all_cached_commit_times(root_folder)
for item in result:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
return item.get("commit"), item.get("commit_times")
return None, {} |
def decrease_frequency(self, frequency=None):
"""
Decreases the frequency.
:param frequency: the frequency to decrease by, 1 if None
:type frequency: int
"""
if frequency is None:
javabridge.call(self.jobject, "decreaseFrequency", "()V")
else:
javabridge.call(self.jobject, "decreaseFrequency", "(I)V", frequency) | Decreases the frequency.
:param frequency: the frequency to decrease by, 1 if None
:type frequency: int | Below is the the instruction that describes the task:
### Input:
Decreases the frequency.
:param frequency: the frequency to decrease by, 1 if None
:type frequency: int
### Response:
def decrease_frequency(self, frequency=None):
"""
Decreases the frequency.
:param frequency: the frequency to decrease by, 1 if None
:type frequency: int
"""
if frequency is None:
javabridge.call(self.jobject, "decreaseFrequency", "()V")
else:
javabridge.call(self.jobject, "decreaseFrequency", "(I)V", frequency) |
def valid(a, b):
"""Check whether `a` and `b` are not inf or nan"""
return ~(np.isnan(a) | np.isinf(a) | np.isnan(b) | np.isinf(b)) | Check whether `a` and `b` are not inf or nan | Below is the the instruction that describes the task:
### Input:
Check whether `a` and `b` are not inf or nan
### Response:
def valid(a, b):
"""Check whether `a` and `b` are not inf or nan"""
return ~(np.isnan(a) | np.isinf(a) | np.isnan(b) | np.isinf(b)) |
def propagate(cls, date):
"""Compute the position of the sun at a given date
Args:
date (~beyond.utils.date.Date)
Return:
~beyond.orbits.orbit.Orbit: Position of the sun in MOD frame
Example:
.. code-block:: python
from beyond.utils.date import Date
SunPropagator.propagate(Date(2006, 4, 2))
# Orbit =
# date = 2006-04-02T00:00:00 UTC
# form = Cartesian
# frame = MOD
# propag = SunPropagator
# coord =
# x = 146186235644.0
# y = 28789144480.5
# z = 12481136552.3
# vx = 0.0
# vy = 0.0
# vz = 0.0
"""
date = date.change_scale('UT1')
t_ut1 = date.julian_century
lambda_M = 280.460 + 36000.771 * t_ut1
M = np.radians(357.5291092 + 35999.05034 * t_ut1)
lambda_el = np.radians(lambda_M + 1.914666471 * np.sin(M) + 0.019994643 * np.sin(2 * M))
r = 1.000140612 - 0.016708617 * np.cos(M) - 0.000139589 * np.cos(2 * M)
eps = np.radians(23.439291 - 0.0130042 * t_ut1)
pv = r * np.array([
np.cos(lambda_el),
np.cos(eps) * np.sin(lambda_el),
np.sin(eps) * np.sin(lambda_el),
0,
0,
0
]) * AU
return Orbit(date, pv, 'cartesian', 'MOD', cls()) | Compute the position of the sun at a given date
Args:
date (~beyond.utils.date.Date)
Return:
~beyond.orbits.orbit.Orbit: Position of the sun in MOD frame
Example:
.. code-block:: python
from beyond.utils.date import Date
SunPropagator.propagate(Date(2006, 4, 2))
# Orbit =
# date = 2006-04-02T00:00:00 UTC
# form = Cartesian
# frame = MOD
# propag = SunPropagator
# coord =
# x = 146186235644.0
# y = 28789144480.5
# z = 12481136552.3
# vx = 0.0
# vy = 0.0
# vz = 0.0 | Below is the the instruction that describes the task:
### Input:
Compute the position of the sun at a given date
Args:
date (~beyond.utils.date.Date)
Return:
~beyond.orbits.orbit.Orbit: Position of the sun in MOD frame
Example:
.. code-block:: python
from beyond.utils.date import Date
SunPropagator.propagate(Date(2006, 4, 2))
# Orbit =
# date = 2006-04-02T00:00:00 UTC
# form = Cartesian
# frame = MOD
# propag = SunPropagator
# coord =
# x = 146186235644.0
# y = 28789144480.5
# z = 12481136552.3
# vx = 0.0
# vy = 0.0
# vz = 0.0
### Response:
def propagate(cls, date):
"""Compute the position of the sun at a given date
Args:
date (~beyond.utils.date.Date)
Return:
~beyond.orbits.orbit.Orbit: Position of the sun in MOD frame
Example:
.. code-block:: python
from beyond.utils.date import Date
SunPropagator.propagate(Date(2006, 4, 2))
# Orbit =
# date = 2006-04-02T00:00:00 UTC
# form = Cartesian
# frame = MOD
# propag = SunPropagator
# coord =
# x = 146186235644.0
# y = 28789144480.5
# z = 12481136552.3
# vx = 0.0
# vy = 0.0
# vz = 0.0
"""
date = date.change_scale('UT1')
t_ut1 = date.julian_century
lambda_M = 280.460 + 36000.771 * t_ut1
M = np.radians(357.5291092 + 35999.05034 * t_ut1)
lambda_el = np.radians(lambda_M + 1.914666471 * np.sin(M) + 0.019994643 * np.sin(2 * M))
r = 1.000140612 - 0.016708617 * np.cos(M) - 0.000139589 * np.cos(2 * M)
eps = np.radians(23.439291 - 0.0130042 * t_ut1)
pv = r * np.array([
np.cos(lambda_el),
np.cos(eps) * np.sin(lambda_el),
np.sin(eps) * np.sin(lambda_el),
0,
0,
0
]) * AU
return Orbit(date, pv, 'cartesian', 'MOD', cls()) |
def plot(self):
"""
Visualize the state.
:return: The generated figure.
:rtype: matplotlib.Figure
"""
width = 10
# The pleasing golden ratio.
height = width / 1.618
f = plt.figure(figsize=(width, height))
ax = f.add_subplot(111, projection="3d")
self.plot_state_histogram(ax)
return f | Visualize the state.
:return: The generated figure.
:rtype: matplotlib.Figure | Below is the the instruction that describes the task:
### Input:
Visualize the state.
:return: The generated figure.
:rtype: matplotlib.Figure
### Response:
def plot(self):
"""
Visualize the state.
:return: The generated figure.
:rtype: matplotlib.Figure
"""
width = 10
# The pleasing golden ratio.
height = width / 1.618
f = plt.figure(figsize=(width, height))
ax = f.add_subplot(111, projection="3d")
self.plot_state_histogram(ax)
return f |
def write_output(self, data, args=None, filename=None, label=None):
"""Write log data to a log file"""
if args:
if not args.outlog:
return 0
if not filename: filename=args.outlog
lastpath = ''
with open(str(filename), 'w') as output_file:
for entry in data['entries']:
if args.label:
if entry['source_path'] == lastpath:
output_file.write(entry['raw_text'] + '\n')
elif args.label == 'fname':
output_file.write('======== ' + \
entry['source_path'].split('/')[-1] + \
' >>>>\n' + entry['raw_text'] + '\n')
elif args.label == 'fpath':
output_file.write('======== ' + \
entry['source_path'] + \
' >>>>\n' + entry['raw_text'] + '\n')
else: output_file.write(entry['raw_text'] + '\n')
lastpath = entry['source_path'] | Write log data to a log file | Below is the the instruction that describes the task:
### Input:
Write log data to a log file
### Response:
def write_output(self, data, args=None, filename=None, label=None):
"""Write log data to a log file"""
if args:
if not args.outlog:
return 0
if not filename: filename=args.outlog
lastpath = ''
with open(str(filename), 'w') as output_file:
for entry in data['entries']:
if args.label:
if entry['source_path'] == lastpath:
output_file.write(entry['raw_text'] + '\n')
elif args.label == 'fname':
output_file.write('======== ' + \
entry['source_path'].split('/')[-1] + \
' >>>>\n' + entry['raw_text'] + '\n')
elif args.label == 'fpath':
output_file.write('======== ' + \
entry['source_path'] + \
' >>>>\n' + entry['raw_text'] + '\n')
else: output_file.write(entry['raw_text'] + '\n')
lastpath = entry['source_path'] |
def update_configuration(app):
"""Update parameters which are dependent on information from the
project-specific conf.py (including its location on the filesystem)"""
config = app.config
project = config.project
config_dir = app.env.srcdir
sys.path.insert(0, os.path.join(config_dir, '..'))
config.html_theme_path.append(os.path.relpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'themes'), config_dir))
if not config.html_logo:
config.html_logo = os.path.relpath(os.path.join(STATIC_PATH, 'safari_logo.png'), config_dir)
if not config.html_favicon:
config.html_favicon = os.path.relpath(os.path.join(STATIC_PATH, 'favicon.ico'), config_dir)
config.html_static_path.append(os.path.relpath(STATIC_PATH, config_dir))
if not config.htmlhelp_basename:
config.htmlhelp_basename = '%sdoc' % project
if not config.latex_logo:
config.latex_logo = os.path.relpath(os.path.join(STATIC_PATH, 'safari_logo.png'), config_dir)
if not config.epub_title:
config.epub_title = u'%s Documentation' % project
if not config.epub_publisher:
config.epub_publisher = config.epub_author
if not config.epub_copyright:
config.epub_copyright = config.copyright
config.latex_documents.append(
(master_doc,
'%s.tex' % project,
u'%s Documentation' % project,
u'Safari',
'manual'))
config.man_pages.append(
(master_doc,
project,
u'%s Documentation' % project,
[u'Safari'],
1))
config.texinfo_documents.append(
(master_doc,
project,
u'%s Documentation' % project,
u'Safari',
project,
'One line description of project.',
'Miscellaneous'))
# Parse the version number from setup.py without actually running setup()
with open(os.path.join(config_dir, '..', 'setup.py'), 'r') as f:
content = f.read()
match = re.search(r"version\s*=\s*['\"]([\d\.]+)['\"]", content)
if match:
config.version = match.group(1)
config.release = config.version | Update parameters which are dependent on information from the
project-specific conf.py (including its location on the filesystem) | Below is the the instruction that describes the task:
### Input:
Update parameters which are dependent on information from the
project-specific conf.py (including its location on the filesystem)
### Response:
def update_configuration(app):
"""Update parameters which are dependent on information from the
project-specific conf.py (including its location on the filesystem)"""
config = app.config
project = config.project
config_dir = app.env.srcdir
sys.path.insert(0, os.path.join(config_dir, '..'))
config.html_theme_path.append(os.path.relpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'themes'), config_dir))
if not config.html_logo:
config.html_logo = os.path.relpath(os.path.join(STATIC_PATH, 'safari_logo.png'), config_dir)
if not config.html_favicon:
config.html_favicon = os.path.relpath(os.path.join(STATIC_PATH, 'favicon.ico'), config_dir)
config.html_static_path.append(os.path.relpath(STATIC_PATH, config_dir))
if not config.htmlhelp_basename:
config.htmlhelp_basename = '%sdoc' % project
if not config.latex_logo:
config.latex_logo = os.path.relpath(os.path.join(STATIC_PATH, 'safari_logo.png'), config_dir)
if not config.epub_title:
config.epub_title = u'%s Documentation' % project
if not config.epub_publisher:
config.epub_publisher = config.epub_author
if not config.epub_copyright:
config.epub_copyright = config.copyright
config.latex_documents.append(
(master_doc,
'%s.tex' % project,
u'%s Documentation' % project,
u'Safari',
'manual'))
config.man_pages.append(
(master_doc,
project,
u'%s Documentation' % project,
[u'Safari'],
1))
config.texinfo_documents.append(
(master_doc,
project,
u'%s Documentation' % project,
u'Safari',
project,
'One line description of project.',
'Miscellaneous'))
# Parse the version number from setup.py without actually running setup()
with open(os.path.join(config_dir, '..', 'setup.py'), 'r') as f:
content = f.read()
match = re.search(r"version\s*=\s*['\"]([\d\.]+)['\"]", content)
if match:
config.version = match.group(1)
config.release = config.version |
def main(_):
"""Load a trained algorithm and render videos."""
utility.set_up_logging()
if not FLAGS.logdir or not FLAGS.outdir:
raise KeyError('You must specify logging and outdirs directories.')
FLAGS.logdir = os.path.expanduser(FLAGS.logdir)
FLAGS.outdir = os.path.expanduser(FLAGS.outdir)
visualize(
FLAGS.logdir, FLAGS.outdir, FLAGS.num_agents, FLAGS.num_episodes,
FLAGS.checkpoint, FLAGS.env_processes) | Load a trained algorithm and render videos. | Below is the the instruction that describes the task:
### Input:
Load a trained algorithm and render videos.
### Response:
def main(_):
"""Load a trained algorithm and render videos."""
utility.set_up_logging()
if not FLAGS.logdir or not FLAGS.outdir:
raise KeyError('You must specify logging and outdirs directories.')
FLAGS.logdir = os.path.expanduser(FLAGS.logdir)
FLAGS.outdir = os.path.expanduser(FLAGS.outdir)
visualize(
FLAGS.logdir, FLAGS.outdir, FLAGS.num_agents, FLAGS.num_episodes,
FLAGS.checkpoint, FLAGS.env_processes) |
def _retry_on_connection_error(func: Callable) -> Callable:
"""Decorator to retry the function max_connection_attemps number of times.
Herewith-decorated functions need an ``_attempt`` keyword argument.
This is to decorate functions that do network requests that may fail. Note that
:meth:`.get_json`, :meth:`.get_iphone_json`, :meth:`.graphql_query` and :meth:`.graphql_node_list` already have
their own logic for retrying, hence functions that only use these for network access must not be decorated with this
decorator."""
@wraps(func)
def call(instaloader, *args, **kwargs):
try:
return func(instaloader, *args, **kwargs)
except (urllib3.exceptions.HTTPError, requests.exceptions.RequestException, ConnectionException) as err:
error_string = "{}({}): {}".format(func.__name__, ', '.join([repr(arg) for arg in args]), err)
if (kwargs.get('_attempt') or 1) == instaloader.context.max_connection_attempts:
raise ConnectionException(error_string) from None
instaloader.context.error(error_string + " [retrying; skip with ^C]", repeat_at_end=False)
try:
if kwargs.get('_attempt'):
kwargs['_attempt'] += 1
else:
kwargs['_attempt'] = 2
instaloader.context.do_sleep()
return call(instaloader, *args, **kwargs)
except KeyboardInterrupt:
instaloader.context.error("[skipped by user]", repeat_at_end=False)
raise ConnectionException(error_string) from None
return call | Decorator to retry the function max_connection_attemps number of times.
Herewith-decorated functions need an ``_attempt`` keyword argument.
This is to decorate functions that do network requests that may fail. Note that
:meth:`.get_json`, :meth:`.get_iphone_json`, :meth:`.graphql_query` and :meth:`.graphql_node_list` already have
their own logic for retrying, hence functions that only use these for network access must not be decorated with this
decorator. | Below is the the instruction that describes the task:
### Input:
Decorator to retry the function max_connection_attemps number of times.
Herewith-decorated functions need an ``_attempt`` keyword argument.
This is to decorate functions that do network requests that may fail. Note that
:meth:`.get_json`, :meth:`.get_iphone_json`, :meth:`.graphql_query` and :meth:`.graphql_node_list` already have
their own logic for retrying, hence functions that only use these for network access must not be decorated with this
decorator.
### Response:
def _retry_on_connection_error(func: Callable) -> Callable:
"""Decorator to retry the function max_connection_attemps number of times.
Herewith-decorated functions need an ``_attempt`` keyword argument.
This is to decorate functions that do network requests that may fail. Note that
:meth:`.get_json`, :meth:`.get_iphone_json`, :meth:`.graphql_query` and :meth:`.graphql_node_list` already have
their own logic for retrying, hence functions that only use these for network access must not be decorated with this
decorator."""
@wraps(func)
def call(instaloader, *args, **kwargs):
try:
return func(instaloader, *args, **kwargs)
except (urllib3.exceptions.HTTPError, requests.exceptions.RequestException, ConnectionException) as err:
error_string = "{}({}): {}".format(func.__name__, ', '.join([repr(arg) for arg in args]), err)
if (kwargs.get('_attempt') or 1) == instaloader.context.max_connection_attempts:
raise ConnectionException(error_string) from None
instaloader.context.error(error_string + " [retrying; skip with ^C]", repeat_at_end=False)
try:
if kwargs.get('_attempt'):
kwargs['_attempt'] += 1
else:
kwargs['_attempt'] = 2
instaloader.context.do_sleep()
return call(instaloader, *args, **kwargs)
except KeyboardInterrupt:
instaloader.context.error("[skipped by user]", repeat_at_end=False)
raise ConnectionException(error_string) from None
return call |
def calculate_angular_momentum(self):
"""
Returns a list of the three (x,y,z) components of the total angular momentum of all particles in the simulation.
"""
clibrebound.reb_tools_angular_momentum.restype = reb_vec3d
L = clibrebound.reb_tools_angular_momentum(byref(self))
return [L.x, L.y, L.z] | Returns a list of the three (x,y,z) components of the total angular momentum of all particles in the simulation. | Below is the the instruction that describes the task:
### Input:
Returns a list of the three (x,y,z) components of the total angular momentum of all particles in the simulation.
### Response:
def calculate_angular_momentum(self):
"""
Returns a list of the three (x,y,z) components of the total angular momentum of all particles in the simulation.
"""
clibrebound.reb_tools_angular_momentum.restype = reb_vec3d
L = clibrebound.reb_tools_angular_momentum(byref(self))
return [L.x, L.y, L.z] |
def get_reservation_resources(session, reservation_id, *models):
""" Get all resources of given models in reservation.
:param session: CloudShell session
:type session: cloudshell.api.cloudshell_api.CloudShellAPISession
:param reservation_id: active reservation ID
:param models: list of requested models
:return: list of all resources of models in reservation
"""
models_resources = []
reservation = session.GetReservationDetails(reservation_id).ReservationDescription
for resource in reservation.Resources:
if resource.ResourceModelName in models:
models_resources.append(resource)
return models_resources | Get all resources of given models in reservation.
:param session: CloudShell session
:type session: cloudshell.api.cloudshell_api.CloudShellAPISession
:param reservation_id: active reservation ID
:param models: list of requested models
:return: list of all resources of models in reservation | Below is the the instruction that describes the task:
### Input:
Get all resources of given models in reservation.
:param session: CloudShell session
:type session: cloudshell.api.cloudshell_api.CloudShellAPISession
:param reservation_id: active reservation ID
:param models: list of requested models
:return: list of all resources of models in reservation
### Response:
def get_reservation_resources(session, reservation_id, *models):
""" Get all resources of given models in reservation.
:param session: CloudShell session
:type session: cloudshell.api.cloudshell_api.CloudShellAPISession
:param reservation_id: active reservation ID
:param models: list of requested models
:return: list of all resources of models in reservation
"""
models_resources = []
reservation = session.GetReservationDetails(reservation_id).ReservationDescription
for resource in reservation.Resources:
if resource.ResourceModelName in models:
models_resources.append(resource)
return models_resources |
def _check_instrument(self):
"""Check and try fix instrument name if needed"""
instr = INSTRUMENTS.get(self.platform_name, self.instrument.lower())
if instr != self.instrument.lower():
self.instrument = instr
LOG.warning("Inconsistent instrument/satellite input - " +
"instrument set to %s", self.instrument)
self.instrument = self.instrument.lower().replace('/', '') | Check and try fix instrument name if needed | Below is the the instruction that describes the task:
### Input:
Check and try fix instrument name if needed
### Response:
def _check_instrument(self):
"""Check and try fix instrument name if needed"""
instr = INSTRUMENTS.get(self.platform_name, self.instrument.lower())
if instr != self.instrument.lower():
self.instrument = instr
LOG.warning("Inconsistent instrument/satellite input - " +
"instrument set to %s", self.instrument)
self.instrument = self.instrument.lower().replace('/', '') |
def _context_build(self, pending=False):
"""
Create a context dict from standard task configuration.
The context is constructed in a standard way and is passed to str.format() on configuration.
The context consists of the entire os.environ, the config 'defines', and a set
of pre-defined values which have a common prefix from 'context_prefix'.
"""
log = self._params.get('log', self._discard)
log.debug("called with pending=%s", pending)
if pending:
conf = self._config_pending
else:
conf = self._config_running
if not conf:
log.warning("No config available")
conf = {}
# Initially create the context as a copy of the environment.
#
context = os.environ.copy()
# Merge in the built-in items. It is important that these
# will override any values from the environment as they will
# have come from a parent instance of "taskforce".
#
context.update(
{
context_prefix+'instance': None,
context_prefix+'pid': None,
context_prefix+'name': self._name,
context_prefix+'ppid': os.getpid(),
context_prefix+'host': self._legion.host,
context_prefix+'fqdn': self._legion.fqdn
}
)
# Add certain config values to the context
for tag in ['user', 'group', 'pidfile', 'cwd']:
if tag in conf:
context[context_prefix+tag] = self._get(conf[tag], context=context)
if self._legion._config_running:
self._context_defines(context, self._legion._config_running)
else:
log.warning("No legion config available for defines")
self._context_defines(context, conf)
self._context_defaults(context, conf)
if self._legion._config_running:
self._context_defaults(context, self._legion._config_running)
else:
log.warning("No legion config available for defaults")
return context | Create a context dict from standard task configuration.
The context is constructed in a standard way and is passed to str.format() on configuration.
The context consists of the entire os.environ, the config 'defines', and a set
of pre-defined values which have a common prefix from 'context_prefix'. | Below is the the instruction that describes the task:
### Input:
Create a context dict from standard task configuration.
The context is constructed in a standard way and is passed to str.format() on configuration.
The context consists of the entire os.environ, the config 'defines', and a set
of pre-defined values which have a common prefix from 'context_prefix'.
### Response:
def _context_build(self, pending=False):
"""
Create a context dict from standard task configuration.
The context is constructed in a standard way and is passed to str.format() on configuration.
The context consists of the entire os.environ, the config 'defines', and a set
of pre-defined values which have a common prefix from 'context_prefix'.
"""
log = self._params.get('log', self._discard)
log.debug("called with pending=%s", pending)
if pending:
conf = self._config_pending
else:
conf = self._config_running
if not conf:
log.warning("No config available")
conf = {}
# Initially create the context as a copy of the environment.
#
context = os.environ.copy()
# Merge in the built-in items. It is important that these
# will override any values from the environment as they will
# have come from a parent instance of "taskforce".
#
context.update(
{
context_prefix+'instance': None,
context_prefix+'pid': None,
context_prefix+'name': self._name,
context_prefix+'ppid': os.getpid(),
context_prefix+'host': self._legion.host,
context_prefix+'fqdn': self._legion.fqdn
}
)
# Add certain config values to the context
for tag in ['user', 'group', 'pidfile', 'cwd']:
if tag in conf:
context[context_prefix+tag] = self._get(conf[tag], context=context)
if self._legion._config_running:
self._context_defines(context, self._legion._config_running)
else:
log.warning("No legion config available for defines")
self._context_defines(context, conf)
self._context_defaults(context, conf)
if self._legion._config_running:
self._context_defaults(context, self._legion._config_running)
else:
log.warning("No legion config available for defaults")
return context |
def get_client_info(self):
"""
A query is sent to the server to obtain the client's data stored at the
server.
:return: :class:`~aioxmpp.ibr.Query`
"""
iq = aioxmpp.IQ(
to=self.client.local_jid.bare().replace(localpart=None),
type_=aioxmpp.IQType.GET,
payload=xso.Query()
)
reply = (yield from self.client.send(iq))
return reply | A query is sent to the server to obtain the client's data stored at the
server.
:return: :class:`~aioxmpp.ibr.Query` | Below is the the instruction that describes the task:
### Input:
A query is sent to the server to obtain the client's data stored at the
server.
:return: :class:`~aioxmpp.ibr.Query`
### Response:
def get_client_info(self):
"""
A query is sent to the server to obtain the client's data stored at the
server.
:return: :class:`~aioxmpp.ibr.Query`
"""
iq = aioxmpp.IQ(
to=self.client.local_jid.bare().replace(localpart=None),
type_=aioxmpp.IQType.GET,
payload=xso.Query()
)
reply = (yield from self.client.send(iq))
return reply |
def get_unpacked_response_body(self, requestId, mimetype="application/unknown"):
'''
Return a unpacked, decoded resposne body from Network_getResponseBody()
'''
content = self.Network_getResponseBody(requestId)
assert 'result' in content
result = content['result']
assert 'base64Encoded' in result
assert 'body' in result
if result['base64Encoded']:
content = base64.b64decode(result['body'])
else:
content = result['body']
self.log.info("Navigate complete. Received %s byte response with type %s.", len(content), mimetype)
return {'binary' : result['base64Encoded'], 'mimetype' : mimetype, 'content' : content} | Return a unpacked, decoded resposne body from Network_getResponseBody() | Below is the the instruction that describes the task:
### Input:
Return a unpacked, decoded resposne body from Network_getResponseBody()
### Response:
def get_unpacked_response_body(self, requestId, mimetype="application/unknown"):
'''
Return a unpacked, decoded resposne body from Network_getResponseBody()
'''
content = self.Network_getResponseBody(requestId)
assert 'result' in content
result = content['result']
assert 'base64Encoded' in result
assert 'body' in result
if result['base64Encoded']:
content = base64.b64decode(result['body'])
else:
content = result['body']
self.log.info("Navigate complete. Received %s byte response with type %s.", len(content), mimetype)
return {'binary' : result['base64Encoded'], 'mimetype' : mimetype, 'content' : content} |
def register():
"""View function which handles a registration request."""
if _security.confirmable or request.is_json:
form_class = _security.confirm_register_form
else:
form_class = _security.register_form
if request.is_json:
form_data = MultiDict(request.get_json())
else:
form_data = request.form
form = form_class(form_data)
if form.validate_on_submit():
user = register_user(**form.to_dict())
form.user = user
if not _security.confirmable or _security.login_without_confirmation:
after_this_request(_commit)
login_user(user)
if not request.is_json:
if 'next' in form:
redirect_url = get_post_register_redirect(form.next.data)
else:
redirect_url = get_post_register_redirect()
return redirect(redirect_url)
return _render_json(form, include_auth_token=True)
if request.is_json:
return _render_json(form)
return _security.render_template(config_value('REGISTER_USER_TEMPLATE'),
register_user_form=form,
**_ctx('register')) | View function which handles a registration request. | Below is the the instruction that describes the task:
### Input:
View function which handles a registration request.
### Response:
def register():
"""View function which handles a registration request."""
if _security.confirmable or request.is_json:
form_class = _security.confirm_register_form
else:
form_class = _security.register_form
if request.is_json:
form_data = MultiDict(request.get_json())
else:
form_data = request.form
form = form_class(form_data)
if form.validate_on_submit():
user = register_user(**form.to_dict())
form.user = user
if not _security.confirmable or _security.login_without_confirmation:
after_this_request(_commit)
login_user(user)
if not request.is_json:
if 'next' in form:
redirect_url = get_post_register_redirect(form.next.data)
else:
redirect_url = get_post_register_redirect()
return redirect(redirect_url)
return _render_json(form, include_auth_token=True)
if request.is_json:
return _render_json(form)
return _security.render_template(config_value('REGISTER_USER_TEMPLATE'),
register_user_form=form,
**_ctx('register')) |
def is_secret_known(
end_state: NettingChannelEndState,
secrethash: SecretHash,
) -> bool:
"""True if the `secrethash` is for a lock with a known secret."""
return (
secrethash in end_state.secrethashes_to_unlockedlocks or
secrethash in end_state.secrethashes_to_onchain_unlockedlocks
) | True if the `secrethash` is for a lock with a known secret. | Below is the the instruction that describes the task:
### Input:
True if the `secrethash` is for a lock with a known secret.
### Response:
def is_secret_known(
end_state: NettingChannelEndState,
secrethash: SecretHash,
) -> bool:
"""True if the `secrethash` is for a lock with a known secret."""
return (
secrethash in end_state.secrethashes_to_unlockedlocks or
secrethash in end_state.secrethashes_to_onchain_unlockedlocks
) |
def get_positions(self):
"""
Returns a list of positions.
http://dev.wheniwork.com/#listing-positions
"""
url = "/2/positions"
data = self._get_resource(url)
positions = []
for entry in data['positions']:
positions.append(self.position_from_json(entry))
return positions | Returns a list of positions.
http://dev.wheniwork.com/#listing-positions | Below is the the instruction that describes the task:
### Input:
Returns a list of positions.
http://dev.wheniwork.com/#listing-positions
### Response:
def get_positions(self):
"""
Returns a list of positions.
http://dev.wheniwork.com/#listing-positions
"""
url = "/2/positions"
data = self._get_resource(url)
positions = []
for entry in data['positions']:
positions.append(self.position_from_json(entry))
return positions |
def to_dict_list_generic_type(df, int_col=None, binary_col=None):
"""Transform each row to dict, and put them into a list. And automatically
convert ``np.int64`` to ``int``, ``pandas.tslib.Timestamp`` to
``datetime.datetime``, ``np.nan`` to ``None``.
:param df: ``pandas.DataFrame`` instance.
:param int_col: integer type columns.
:param binary_col: binary type type columns.
**中文文档**
由于 ``pandas.Series`` 中的值的整数数据类型是 ``numpy.int64``,
时间数据类型是 ``pandas.tslib.Timestamp``, None的数据类型是 ``np.nan``。
虽然从访问和计算的角度来说没有什么问题, 但会和很多数据库的操作不兼容。
此函数能将 ``pandas.DataFrame`` 转化成字典的列表。数据类型能正确的获得int,
bytes和datetime.datetime。
"""
# Pre-process int_col, binary_col and datetime_col
if (int_col is not None) and (not isinstance(int_col, (list, tuple))):
int_col = [int_col, ]
if (binary_col is not None) and (not isinstance(binary_col, (list, tuple))):
binary_col = [binary_col, ]
datetime_col = list()
for col, dtype in dict(df.dtypes).items():
if "datetime64" in str(dtype):
datetime_col.append(col)
if len(datetime_col) == 0:
datetime_col = None
# Pre-process binary column dataframe
def b64_encode(b):
try:
return base64.b64encode(b)
except:
return b
if binary_col is not None:
for col in binary_col:
df[col] = df[col].apply(b64_encode)
data = json.loads(df.to_json(orient="records", date_format="iso"))
if int_col is not None:
for row in data:
for col in int_col:
try:
row[col] = int(row[col])
except:
pass
if binary_col is not None:
for row in data:
for col in binary_col:
try:
row[col] = base64.b64decode(row[col].encode("ascii"))
except:
pass
if datetime_col is not None:
for row in data:
for col in datetime_col:
try:
row[col] = rolex.str2datetime(row[col])
except:
pass
return data | Transform each row to dict, and put them into a list. And automatically
convert ``np.int64`` to ``int``, ``pandas.tslib.Timestamp`` to
``datetime.datetime``, ``np.nan`` to ``None``.
:param df: ``pandas.DataFrame`` instance.
:param int_col: integer type columns.
:param binary_col: binary type type columns.
**中文文档**
由于 ``pandas.Series`` 中的值的整数数据类型是 ``numpy.int64``,
时间数据类型是 ``pandas.tslib.Timestamp``, None的数据类型是 ``np.nan``。
虽然从访问和计算的角度来说没有什么问题, 但会和很多数据库的操作不兼容。
此函数能将 ``pandas.DataFrame`` 转化成字典的列表。数据类型能正确的获得int,
bytes和datetime.datetime。 | Below is the the instruction that describes the task:
### Input:
Transform each row to dict, and put them into a list. And automatically
convert ``np.int64`` to ``int``, ``pandas.tslib.Timestamp`` to
``datetime.datetime``, ``np.nan`` to ``None``.
:param df: ``pandas.DataFrame`` instance.
:param int_col: integer type columns.
:param binary_col: binary type type columns.
**中文文档**
由于 ``pandas.Series`` 中的值的整数数据类型是 ``numpy.int64``,
时间数据类型是 ``pandas.tslib.Timestamp``, None的数据类型是 ``np.nan``。
虽然从访问和计算的角度来说没有什么问题, 但会和很多数据库的操作不兼容。
此函数能将 ``pandas.DataFrame`` 转化成字典的列表。数据类型能正确的获得int,
bytes和datetime.datetime。
### Response:
def to_dict_list_generic_type(df, int_col=None, binary_col=None):
"""Transform each row to dict, and put them into a list. And automatically
convert ``np.int64`` to ``int``, ``pandas.tslib.Timestamp`` to
``datetime.datetime``, ``np.nan`` to ``None``.
:param df: ``pandas.DataFrame`` instance.
:param int_col: integer type columns.
:param binary_col: binary type type columns.
**中文文档**
由于 ``pandas.Series`` 中的值的整数数据类型是 ``numpy.int64``,
时间数据类型是 ``pandas.tslib.Timestamp``, None的数据类型是 ``np.nan``。
虽然从访问和计算的角度来说没有什么问题, 但会和很多数据库的操作不兼容。
此函数能将 ``pandas.DataFrame`` 转化成字典的列表。数据类型能正确的获得int,
bytes和datetime.datetime。
"""
# Pre-process int_col, binary_col and datetime_col
if (int_col is not None) and (not isinstance(int_col, (list, tuple))):
int_col = [int_col, ]
if (binary_col is not None) and (not isinstance(binary_col, (list, tuple))):
binary_col = [binary_col, ]
datetime_col = list()
for col, dtype in dict(df.dtypes).items():
if "datetime64" in str(dtype):
datetime_col.append(col)
if len(datetime_col) == 0:
datetime_col = None
# Pre-process binary column dataframe
def b64_encode(b):
try:
return base64.b64encode(b)
except:
return b
if binary_col is not None:
for col in binary_col:
df[col] = df[col].apply(b64_encode)
data = json.loads(df.to_json(orient="records", date_format="iso"))
if int_col is not None:
for row in data:
for col in int_col:
try:
row[col] = int(row[col])
except:
pass
if binary_col is not None:
for row in data:
for col in binary_col:
try:
row[col] = base64.b64decode(row[col].encode("ascii"))
except:
pass
if datetime_col is not None:
for row in data:
for col in datetime_col:
try:
row[col] = rolex.str2datetime(row[col])
except:
pass
return data |
def _build_saveframe(self, lexer):
"""Build NMR-STAR file saveframe.
:param lexer: instance of the lexical analyzer.
:type lexer: :func:`~nmrstarlib.bmrblex.bmrblex`
:return: Saveframe dictionary.
:rtype: :py:class:`collections.OrderedDict`
"""
odict = OrderedDict()
loop_count = 0
token = next(lexer)
while token != u"save_":
try:
if token[0] == u"_":
# This strips off the leading underscore of tagnames for readability
odict[token[1:]] = next(lexer)
# Skip the saveframe if it's not in the list of wanted categories
if self._frame_categories:
if token == "_Saveframe_category" and odict[token[1:]] not in self._frame_categories:
raise SkipSaveFrame()
elif token == u"loop_":
odict[u"loop_{}".format(loop_count)] = self._build_loop(lexer)
loop_count += 1
elif token.lstrip().startswith(u"#"):
continue
else:
print("Error: Invalid token {}".format(token), file=sys.stderr)
print("In _build_saveframe try block", file=sys.stderr)
raise InvalidToken("{}".format(token))
except IndexError:
print("Error: Invalid token {}".format(token), file=sys.stderr)
print("In _build_saveframe except block", file=sys.stderr)
raise
except SkipSaveFrame:
self._skip_saveframe(lexer)
odict = None
finally:
if odict is None:
token = u"save_"
else:
token = next(lexer)
return odict | Build NMR-STAR file saveframe.
:param lexer: instance of the lexical analyzer.
:type lexer: :func:`~nmrstarlib.bmrblex.bmrblex`
:return: Saveframe dictionary.
:rtype: :py:class:`collections.OrderedDict` | Below is the the instruction that describes the task:
### Input:
Build NMR-STAR file saveframe.
:param lexer: instance of the lexical analyzer.
:type lexer: :func:`~nmrstarlib.bmrblex.bmrblex`
:return: Saveframe dictionary.
:rtype: :py:class:`collections.OrderedDict`
### Response:
def _build_saveframe(self, lexer):
"""Build NMR-STAR file saveframe.
:param lexer: instance of the lexical analyzer.
:type lexer: :func:`~nmrstarlib.bmrblex.bmrblex`
:return: Saveframe dictionary.
:rtype: :py:class:`collections.OrderedDict`
"""
odict = OrderedDict()
loop_count = 0
token = next(lexer)
while token != u"save_":
try:
if token[0] == u"_":
# This strips off the leading underscore of tagnames for readability
odict[token[1:]] = next(lexer)
# Skip the saveframe if it's not in the list of wanted categories
if self._frame_categories:
if token == "_Saveframe_category" and odict[token[1:]] not in self._frame_categories:
raise SkipSaveFrame()
elif token == u"loop_":
odict[u"loop_{}".format(loop_count)] = self._build_loop(lexer)
loop_count += 1
elif token.lstrip().startswith(u"#"):
continue
else:
print("Error: Invalid token {}".format(token), file=sys.stderr)
print("In _build_saveframe try block", file=sys.stderr)
raise InvalidToken("{}".format(token))
except IndexError:
print("Error: Invalid token {}".format(token), file=sys.stderr)
print("In _build_saveframe except block", file=sys.stderr)
raise
except SkipSaveFrame:
self._skip_saveframe(lexer)
odict = None
finally:
if odict is None:
token = u"save_"
else:
token = next(lexer)
return odict |
def update_attribute_group(attributegroup, **kwargs):
"""
Add a new attribute group.
An attribute group is a container for attributes which need to be grouped
in some logical way. For example, if the 'attr_is_var' flag isn't expressive
enough to delineate different groupings.
an attribute group looks like:
{
'project_id' : XXX,
'name' : 'my group name'
'description : 'my group description' (optional)
'layout' : 'my group layout' (optional)
'exclusive' : 'N' (or 'Y' ) (optional, default to 'N')
}
"""
user_id=kwargs.get('user_id')
if attributegroup.id is None:
raise HydraError("cannot update attribute group. no ID specified")
try:
group_i = db.DBSession.query(AttrGroup).filter(AttrGroup.id==attributegroup.id).one()
group_i.project.check_write_permission(user_id)
group_i.name = attributegroup.name
group_i.description = attributegroup.description
group_i.layout = attributegroup.layout
group_i.exclusive = attributegroup.exclusive
db.DBSession.flush()
log.info("Group %s in project %s updated", attributegroup.id, attributegroup.project_id)
except NoResultFound:
raise HydraError('No Attribute Group %s was found in project %s', attributegroup.id, attributegroup.project_id)
return group_i | Add a new attribute group.
An attribute group is a container for attributes which need to be grouped
in some logical way. For example, if the 'attr_is_var' flag isn't expressive
enough to delineate different groupings.
an attribute group looks like:
{
'project_id' : XXX,
'name' : 'my group name'
'description : 'my group description' (optional)
'layout' : 'my group layout' (optional)
'exclusive' : 'N' (or 'Y' ) (optional, default to 'N')
} | Below is the the instruction that describes the task:
### Input:
Add a new attribute group.
An attribute group is a container for attributes which need to be grouped
in some logical way. For example, if the 'attr_is_var' flag isn't expressive
enough to delineate different groupings.
an attribute group looks like:
{
'project_id' : XXX,
'name' : 'my group name'
'description : 'my group description' (optional)
'layout' : 'my group layout' (optional)
'exclusive' : 'N' (or 'Y' ) (optional, default to 'N')
}
### Response:
def update_attribute_group(attributegroup, **kwargs):
"""
Add a new attribute group.
An attribute group is a container for attributes which need to be grouped
in some logical way. For example, if the 'attr_is_var' flag isn't expressive
enough to delineate different groupings.
an attribute group looks like:
{
'project_id' : XXX,
'name' : 'my group name'
'description : 'my group description' (optional)
'layout' : 'my group layout' (optional)
'exclusive' : 'N' (or 'Y' ) (optional, default to 'N')
}
"""
user_id=kwargs.get('user_id')
if attributegroup.id is None:
raise HydraError("cannot update attribute group. no ID specified")
try:
group_i = db.DBSession.query(AttrGroup).filter(AttrGroup.id==attributegroup.id).one()
group_i.project.check_write_permission(user_id)
group_i.name = attributegroup.name
group_i.description = attributegroup.description
group_i.layout = attributegroup.layout
group_i.exclusive = attributegroup.exclusive
db.DBSession.flush()
log.info("Group %s in project %s updated", attributegroup.id, attributegroup.project_id)
except NoResultFound:
raise HydraError('No Attribute Group %s was found in project %s', attributegroup.id, attributegroup.project_id)
return group_i |
def _process_gradient_args(f, kwargs):
"""Handle common processing of arguments for gradient and gradient-like functions."""
axes = kwargs.get('axes', range(f.ndim))
def _check_length(positions):
if 'axes' in kwargs and len(positions) < len(axes):
raise ValueError('Length of "coordinates" or "deltas" cannot be less than that '
'of "axes".')
elif 'axes' not in kwargs and len(positions) != len(axes):
raise ValueError('Length of "coordinates" or "deltas" must match the number of '
'dimensions of "f" when "axes" is not given.')
if 'deltas' in kwargs:
if 'coordinates' in kwargs or 'x' in kwargs:
raise ValueError('Cannot specify both "coordinates" and "deltas".')
_check_length(kwargs['deltas'])
return 'delta', kwargs['deltas'], axes
elif 'coordinates' in kwargs:
_check_length(kwargs['coordinates'])
return 'x', kwargs['coordinates'], axes
elif 'x' in kwargs:
warnings.warn('The use of "x" as a parameter for coordinate values has been '
'deprecated. Use "coordinates" instead.', metpyDeprecation)
_check_length(kwargs['x'])
return 'x', kwargs['x'], axes
elif isinstance(f, xr.DataArray):
return 'pass', axes, axes # only the axis argument matters
else:
raise ValueError('Must specify either "coordinates" or "deltas" for value positions '
'when "f" is not a DataArray.') | Handle common processing of arguments for gradient and gradient-like functions. | Below is the the instruction that describes the task:
### Input:
Handle common processing of arguments for gradient and gradient-like functions.
### Response:
def _process_gradient_args(f, kwargs):
"""Handle common processing of arguments for gradient and gradient-like functions."""
axes = kwargs.get('axes', range(f.ndim))
def _check_length(positions):
if 'axes' in kwargs and len(positions) < len(axes):
raise ValueError('Length of "coordinates" or "deltas" cannot be less than that '
'of "axes".')
elif 'axes' not in kwargs and len(positions) != len(axes):
raise ValueError('Length of "coordinates" or "deltas" must match the number of '
'dimensions of "f" when "axes" is not given.')
if 'deltas' in kwargs:
if 'coordinates' in kwargs or 'x' in kwargs:
raise ValueError('Cannot specify both "coordinates" and "deltas".')
_check_length(kwargs['deltas'])
return 'delta', kwargs['deltas'], axes
elif 'coordinates' in kwargs:
_check_length(kwargs['coordinates'])
return 'x', kwargs['coordinates'], axes
elif 'x' in kwargs:
warnings.warn('The use of "x" as a parameter for coordinate values has been '
'deprecated. Use "coordinates" instead.', metpyDeprecation)
_check_length(kwargs['x'])
return 'x', kwargs['x'], axes
elif isinstance(f, xr.DataArray):
return 'pass', axes, axes # only the axis argument matters
else:
raise ValueError('Must specify either "coordinates" or "deltas" for value positions '
'when "f" is not a DataArray.') |
def cat(self, paths, check_crc=False):
''' Fetch all files that match the source file pattern
and display their content on stdout.
:param paths: Paths to display
:type paths: list of strings
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("cat: no path given")
processor = lambda path, node, check_crc=check_crc: self._handle_cat(path, node, check_crc)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item | Fetch all files that match the source file pattern
and display their content on stdout.
:param paths: Paths to display
:type paths: list of strings
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings | Below is the the instruction that describes the task:
### Input:
Fetch all files that match the source file pattern
and display their content on stdout.
:param paths: Paths to display
:type paths: list of strings
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings
### Response:
def cat(self, paths, check_crc=False):
''' Fetch all files that match the source file pattern
and display their content on stdout.
:param paths: Paths to display
:type paths: list of strings
:param check_crc: Check for checksum errors
:type check_crc: boolean
:returns: a generator that yields strings
'''
if not isinstance(paths, list):
raise InvalidInputException("Paths should be a list")
if not paths:
raise InvalidInputException("cat: no path given")
processor = lambda path, node, check_crc=check_crc: self._handle_cat(path, node, check_crc)
for item in self._find_items(paths, processor, include_toplevel=True,
include_children=False, recurse=False):
if item:
yield item |
def CreateSms(self, MessageType, *TargetNumbers):
"""Creates an SMS message.
:Parameters:
MessageType : `enums`.smsMessageType*
Message type.
TargetNumbers : str
One or more target SMS numbers.
:return: An sms message object.
:rtype: `SmsMessage`
"""
return SmsMessage(self, chop(self._DoCommand('CREATE SMS %s %s' % (MessageType, ', '.join(TargetNumbers))), 2)[1]) | Creates an SMS message.
:Parameters:
MessageType : `enums`.smsMessageType*
Message type.
TargetNumbers : str
One or more target SMS numbers.
:return: An sms message object.
:rtype: `SmsMessage` | Below is the the instruction that describes the task:
### Input:
Creates an SMS message.
:Parameters:
MessageType : `enums`.smsMessageType*
Message type.
TargetNumbers : str
One or more target SMS numbers.
:return: An sms message object.
:rtype: `SmsMessage`
### Response:
def CreateSms(self, MessageType, *TargetNumbers):
"""Creates an SMS message.
:Parameters:
MessageType : `enums`.smsMessageType*
Message type.
TargetNumbers : str
One or more target SMS numbers.
:return: An sms message object.
:rtype: `SmsMessage`
"""
return SmsMessage(self, chop(self._DoCommand('CREATE SMS %s %s' % (MessageType, ', '.join(TargetNumbers))), 2)[1]) |
def sbo_version(self, repo, find):
"""
Add version to SBo packages
"""
ver = ""
if repo == "sbo":
ver = "-" + SBoGrep(find).version()
return ver | Add version to SBo packages | Below is the the instruction that describes the task:
### Input:
Add version to SBo packages
### Response:
def sbo_version(self, repo, find):
"""
Add version to SBo packages
"""
ver = ""
if repo == "sbo":
ver = "-" + SBoGrep(find).version()
return ver |
def send_command(self, obj, command, *arguments):
""" Send command and do not parse output (except for communication errors).
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
"""
index_command = obj._build_index_command(command, *arguments)
self.chassis_list[obj.chassis].sendQueryVerify(index_command) | Send command and do not parse output (except for communication errors).
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments. | Below is the the instruction that describes the task:
### Input:
Send command and do not parse output (except for communication errors).
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
### Response:
def send_command(self, obj, command, *arguments):
""" Send command and do not parse output (except for communication errors).
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
"""
index_command = obj._build_index_command(command, *arguments)
self.chassis_list[obj.chassis].sendQueryVerify(index_command) |
def integrate_days(self, days=1.0, verbose=True):
"""Integrates the model forward for a specified number of days.
It convertes the given number of days into years and calls
:func:`integrate_years`.
:param float days: integration time for the model in days
[default: 1.0]
:param bool verbose: information whether model time details
should be printed [default: True]
:Example:
::
>>> import climlab
>>> model = climlab.EBM()
>>> model.global_mean_temperature()
Field(11.997968598413685)
>>> model.integrate_days(80.)
Integrating for 19 steps, 80.0 days, or 0.219032740466 years.
Total elapsed time is 0.211111111111 years.
>>> model.global_mean_temperature()
Field(11.873680783355553)
"""
years = days / const.days_per_year
self.integrate_years(years=years, verbose=verbose) | Integrates the model forward for a specified number of days.
It convertes the given number of days into years and calls
:func:`integrate_years`.
:param float days: integration time for the model in days
[default: 1.0]
:param bool verbose: information whether model time details
should be printed [default: True]
:Example:
::
>>> import climlab
>>> model = climlab.EBM()
>>> model.global_mean_temperature()
Field(11.997968598413685)
>>> model.integrate_days(80.)
Integrating for 19 steps, 80.0 days, or 0.219032740466 years.
Total elapsed time is 0.211111111111 years.
>>> model.global_mean_temperature()
Field(11.873680783355553) | Below is the the instruction that describes the task:
### Input:
Integrates the model forward for a specified number of days.
It convertes the given number of days into years and calls
:func:`integrate_years`.
:param float days: integration time for the model in days
[default: 1.0]
:param bool verbose: information whether model time details
should be printed [default: True]
:Example:
::
>>> import climlab
>>> model = climlab.EBM()
>>> model.global_mean_temperature()
Field(11.997968598413685)
>>> model.integrate_days(80.)
Integrating for 19 steps, 80.0 days, or 0.219032740466 years.
Total elapsed time is 0.211111111111 years.
>>> model.global_mean_temperature()
Field(11.873680783355553)
### Response:
def integrate_days(self, days=1.0, verbose=True):
"""Integrates the model forward for a specified number of days.
It convertes the given number of days into years and calls
:func:`integrate_years`.
:param float days: integration time for the model in days
[default: 1.0]
:param bool verbose: information whether model time details
should be printed [default: True]
:Example:
::
>>> import climlab
>>> model = climlab.EBM()
>>> model.global_mean_temperature()
Field(11.997968598413685)
>>> model.integrate_days(80.)
Integrating for 19 steps, 80.0 days, or 0.219032740466 years.
Total elapsed time is 0.211111111111 years.
>>> model.global_mean_temperature()
Field(11.873680783355553)
"""
years = days / const.days_per_year
self.integrate_years(years=years, verbose=verbose) |
def generate_plaintext_random(plain_vocab, distribution, train_samples,
length):
"""Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies.
"""
if distribution is not None:
assert len(distribution) == len(plain_vocab)
train_indices = np.random.choice(
range(len(plain_vocab)), (train_samples, length), p=distribution)
return train_indices | Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies. | Below is the the instruction that describes the task:
### Input:
Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies.
### Response:
def generate_plaintext_random(plain_vocab, distribution, train_samples,
length):
"""Generates samples of text from the provided vocabulary.
Args:
plain_vocab: vocabulary.
distribution: distribution.
train_samples: samples for training.
length: length.
Returns:
train_indices (np.array of Integers): random integers for training.
shape = [num_samples, length]
test_indices (np.array of Integers): random integers for testing.
shape = [num_samples, length]
plain_vocab (list of Integers): unique vocabularies.
"""
if distribution is not None:
assert len(distribution) == len(plain_vocab)
train_indices = np.random.choice(
range(len(plain_vocab)), (train_samples, length), p=distribution)
return train_indices |
def reload(self):
"""Reload the metadata for this cluster.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_cluster]
:end-before: [END bigtable_reload_cluster]
"""
cluster_pb = self._instance._client.instance_admin_client.get_cluster(self.name)
# NOTE: _update_from_pb does not check that the project and
# cluster ID on the response match the request.
self._update_from_pb(cluster_pb) | Reload the metadata for this cluster.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_cluster]
:end-before: [END bigtable_reload_cluster] | Below is the the instruction that describes the task:
### Input:
Reload the metadata for this cluster.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_cluster]
:end-before: [END bigtable_reload_cluster]
### Response:
def reload(self):
"""Reload the metadata for this cluster.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_cluster]
:end-before: [END bigtable_reload_cluster]
"""
cluster_pb = self._instance._client.instance_admin_client.get_cluster(self.name)
# NOTE: _update_from_pb does not check that the project and
# cluster ID on the response match the request.
self._update_from_pb(cluster_pb) |
def sequence(values):
"""
Wrap a list of Python values as an Ibis sequence type
Parameters
----------
values : list
Should all be None or the same type
Returns
-------
seq : Sequence
"""
import ibis.expr.operations as ops
return ops.ValueList(values).to_expr() | Wrap a list of Python values as an Ibis sequence type
Parameters
----------
values : list
Should all be None or the same type
Returns
-------
seq : Sequence | Below is the the instruction that describes the task:
### Input:
Wrap a list of Python values as an Ibis sequence type
Parameters
----------
values : list
Should all be None or the same type
Returns
-------
seq : Sequence
### Response:
def sequence(values):
"""
Wrap a list of Python values as an Ibis sequence type
Parameters
----------
values : list
Should all be None or the same type
Returns
-------
seq : Sequence
"""
import ibis.expr.operations as ops
return ops.ValueList(values).to_expr() |
def subjects_download(self, subject_id):
"""Get data file for subject with given identifier.
Parameters
----------
subject_id : string
Unique subject identifier
Returns
-------
FileInfo
Information about subject's data file on disk or None if identifier
is unknown
"""
# Retrieve subject to ensure that it exist
subject = self.subjects_get(subject_id)
if subject is None:
# Return None if subject is unknown
return None
else:
# Reference and information for original uploaded file
return FileInfo(
subject.data_file,
subject.properties[datastore.PROPERTY_MIMETYPE],
subject.properties[datastore.PROPERTY_FILENAME]
) | Get data file for subject with given identifier.
Parameters
----------
subject_id : string
Unique subject identifier
Returns
-------
FileInfo
Information about subject's data file on disk or None if identifier
is unknown | Below is the the instruction that describes the task:
### Input:
Get data file for subject with given identifier.
Parameters
----------
subject_id : string
Unique subject identifier
Returns
-------
FileInfo
Information about subject's data file on disk or None if identifier
is unknown
### Response:
def subjects_download(self, subject_id):
"""Get data file for subject with given identifier.
Parameters
----------
subject_id : string
Unique subject identifier
Returns
-------
FileInfo
Information about subject's data file on disk or None if identifier
is unknown
"""
# Retrieve subject to ensure that it exist
subject = self.subjects_get(subject_id)
if subject is None:
# Return None if subject is unknown
return None
else:
# Reference and information for original uploaded file
return FileInfo(
subject.data_file,
subject.properties[datastore.PROPERTY_MIMETYPE],
subject.properties[datastore.PROPERTY_FILENAME]
) |
def _set_advertisement_interval(self, v, load=False):
"""
Setter method for advertisement_interval, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/advertisement_interval (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_advertisement_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_advertisement_interval() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """advertisement_interval must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__advertisement_interval = t
if hasattr(self, '_set'):
self._set() | Setter method for advertisement_interval, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/advertisement_interval (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_advertisement_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_advertisement_interval() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for advertisement_interval, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/advertisement_interval (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_advertisement_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_advertisement_interval() directly.
### Response:
def _set_advertisement_interval(self, v, load=False):
"""
Setter method for advertisement_interval, mapped from YANG variable /routing_system/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf/neighbor/af_ipv4_vrf_neighbor_address_holder/af_ipv4_neighbor_addr/advertisement_interval (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_advertisement_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_advertisement_interval() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """advertisement_interval must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=advertisement_interval.advertisement_interval, is_container='container', presence=False, yang_name="advertisement-interval", rest_name="advertisement-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Minimum interval between sending BGP routing updates', u'cli-compact-syntax': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__advertisement_interval = t
if hasattr(self, '_set'):
self._set() |
def datetime_at_loc(self, loc):
"""Returns the timestamp at the given integer location as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.dateTimeAtLoc(loc))) | Returns the timestamp at the given integer location as a Pandas Timestamp. | Below is the the instruction that describes the task:
### Input:
Returns the timestamp at the given integer location as a Pandas Timestamp.
### Response:
def datetime_at_loc(self, loc):
"""Returns the timestamp at the given integer location as a Pandas Timestamp."""
return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.dateTimeAtLoc(loc))) |
def variable(
self,
name=None,
function=None,
decl_type=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to variable declaration, that is matched defined
criteria"""
return (
self._find_single(
self._impl_matchers[
scopedef_t.variable],
name=name,
function=function,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
) | returns reference to variable declaration, that is matched defined
criteria | Below is the the instruction that describes the task:
### Input:
returns reference to variable declaration, that is matched defined
criteria
### Response:
def variable(
self,
name=None,
function=None,
decl_type=None,
header_dir=None,
header_file=None,
recursive=None):
"""returns reference to variable declaration, that is matched defined
criteria"""
return (
self._find_single(
self._impl_matchers[
scopedef_t.variable],
name=name,
function=function,
decl_type=decl_type,
header_dir=header_dir,
header_file=header_file,
recursive=recursive)
) |
def save(self, commit=True):
"""Save and send"""
contact = super(ContactFormBase, self).save()
context = {'contact': contact}
context.update(get_site_metas())
subject = ''.join(render_to_string(self.mail_subject_template, context).splitlines())
content = render_to_string(self.mail_content_template, context)
send_mail(subject, content,
settings.DEFAULT_FROM_EMAIL,
settings.CONTACT_FORM_TO,
fail_silently=not settings.DEBUG)
return contact | Save and send | Below is the the instruction that describes the task:
### Input:
Save and send
### Response:
def save(self, commit=True):
"""Save and send"""
contact = super(ContactFormBase, self).save()
context = {'contact': contact}
context.update(get_site_metas())
subject = ''.join(render_to_string(self.mail_subject_template, context).splitlines())
content = render_to_string(self.mail_content_template, context)
send_mail(subject, content,
settings.DEFAULT_FROM_EMAIL,
settings.CONTACT_FORM_TO,
fail_silently=not settings.DEBUG)
return contact |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.