code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def switch_mode(self, new_mode):
""" Explicitly switch the Bus Pirate mode
:param new_mode: The mode to switch to. Use the buspirate.MODE_* constants
"""
packet = bytearray()
packet.append(new_mode)
self.device.write(packet)
possible_responses = {
self.MODE_I2C: b'I2C1',
self.MODE_RAW: b'BBIO1',
self.MODE_SPI: b'API1',
self.MODE_UART: b'ART1',
self.MODE_ONEWIRE: b'1W01'
}
expected = possible_responses[new_mode]
response = self.device.read(4)
if response != expected:
raise Exception('Could not switch mode')
self.mode = new_mode
self.set_peripheral()
if self.i2c_speed:
self._set_i2c_speed(self.i2c_speed) | Explicitly switch the Bus Pirate mode
:param new_mode: The mode to switch to. Use the buspirate.MODE_* constants | Below is the the instruction that describes the task:
### Input:
Explicitly switch the Bus Pirate mode
:param new_mode: The mode to switch to. Use the buspirate.MODE_* constants
### Response:
def switch_mode(self, new_mode):
""" Explicitly switch the Bus Pirate mode
:param new_mode: The mode to switch to. Use the buspirate.MODE_* constants
"""
packet = bytearray()
packet.append(new_mode)
self.device.write(packet)
possible_responses = {
self.MODE_I2C: b'I2C1',
self.MODE_RAW: b'BBIO1',
self.MODE_SPI: b'API1',
self.MODE_UART: b'ART1',
self.MODE_ONEWIRE: b'1W01'
}
expected = possible_responses[new_mode]
response = self.device.read(4)
if response != expected:
raise Exception('Could not switch mode')
self.mode = new_mode
self.set_peripheral()
if self.i2c_speed:
self._set_i2c_speed(self.i2c_speed) |
def _tasks_to_reinsert(tasks, transactional):
"""Return a list containing the tasks that should be reinserted based on the
was_enqueued property and whether the insert is transactional or not.
"""
if transactional:
return tasks
return [task for task in tasks if not task.was_enqueued] | Return a list containing the tasks that should be reinserted based on the
was_enqueued property and whether the insert is transactional or not. | Below is the the instruction that describes the task:
### Input:
Return a list containing the tasks that should be reinserted based on the
was_enqueued property and whether the insert is transactional or not.
### Response:
def _tasks_to_reinsert(tasks, transactional):
"""Return a list containing the tasks that should be reinserted based on the
was_enqueued property and whether the insert is transactional or not.
"""
if transactional:
return tasks
return [task for task in tasks if not task.was_enqueued] |
def tty(tty_reload):
"""Load colors in tty."""
tty_script = os.path.join(CACHE_DIR, "colors-tty.sh")
term = os.environ.get("TERM")
if tty_reload and term == "linux":
subprocess.Popen(["sh", tty_script]) | Load colors in tty. | Below is the the instruction that describes the task:
### Input:
Load colors in tty.
### Response:
def tty(tty_reload):
"""Load colors in tty."""
tty_script = os.path.join(CACHE_DIR, "colors-tty.sh")
term = os.environ.get("TERM")
if tty_reload and term == "linux":
subprocess.Popen(["sh", tty_script]) |
def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not self._file_object_set_in_init and not path_spec:
raise ValueError('Missing path specification.')
if self._file_object_set_in_init:
return
self._file_object = self._OpenFileObject(path_spec)
if not self._file_object:
raise IOError('Unable to open missing file-like object.') | Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | Below is the the instruction that describes the task:
### Input:
Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
### Response:
def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not self._file_object_set_in_init and not path_spec:
raise ValueError('Missing path specification.')
if self._file_object_set_in_init:
return
self._file_object = self._OpenFileObject(path_spec)
if not self._file_object:
raise IOError('Unable to open missing file-like object.') |
def _create_token_set(self):
"""Creates a token set of all tokens in the index using `lunr.TokenSet`
"""
self.token_set = TokenSet.from_list(sorted(list(self.inverted_index.keys()))) | Creates a token set of all tokens in the index using `lunr.TokenSet` | Below is the the instruction that describes the task:
### Input:
Creates a token set of all tokens in the index using `lunr.TokenSet`
### Response:
def _create_token_set(self):
"""Creates a token set of all tokens in the index using `lunr.TokenSet`
"""
self.token_set = TokenSet.from_list(sorted(list(self.inverted_index.keys()))) |
def run(self):
"""Run checker."""
def split(module):
"""Split module into submodules."""
return tuple(module.split("."))
def modcmp(lib=(), test=()):
"""Compare import modules."""
if len(lib) > len(test):
return False
return all(a == b for a, b in zip(lib, test))
mods_1st_party = set()
mods_3rd_party = set()
# Get 1st party modules (used for absolute imports).
modules = [project2module(self.setup.keywords.get('name', ""))]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]]
mods_1st_party.update(split(x) for x in modules)
requirements = self.requirements
if self.setup.redirected:
# Use requirements from setup if available.
requirements = self.setup.get_requirements(
setup=self.processing_setup_py,
tests=True,
)
# Get 3rd party module names based on requirements.
for requirement in requirements:
modules = [project2module(requirement.project_name)]
if modules[0] in KNOWN_3RD_PARTIES:
modules = KNOWN_3RD_PARTIES[modules[0]]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]]
mods_3rd_party.update(split(x) for x in modules)
# When processing setup.py file, forcefully add setuptools to the
# project requirements. Setuptools might be required to build the
# project, even though it is not listed as a requirement - this
# package is required to run setup.py, so listing it as a setup
# requirement would be pointless.
if self.processing_setup_py:
mods_3rd_party.add(split("setuptools"))
for node in ImportVisitor(self.tree).imports:
_mod = split(node.mod)
_alt = split(node.alt)
if any([_mod[0] == x for x in STDLIB]):
continue
if any([modcmp(x, _mod) or modcmp(x, _alt)
for x in mods_1st_party]):
continue
if any([modcmp(x, _mod) or modcmp(x, _alt)
for x in mods_3rd_party]):
continue
yield (
node.line,
node.offset,
ERRORS['I900'].format(pkg=node.mod),
Flake8Checker,
) | Run checker. | Below is the the instruction that describes the task:
### Input:
Run checker.
### Response:
def run(self):
"""Run checker."""
def split(module):
"""Split module into submodules."""
return tuple(module.split("."))
def modcmp(lib=(), test=()):
"""Compare import modules."""
if len(lib) > len(test):
return False
return all(a == b for a, b in zip(lib, test))
mods_1st_party = set()
mods_3rd_party = set()
# Get 1st party modules (used for absolute imports).
modules = [project2module(self.setup.keywords.get('name', ""))]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]]
mods_1st_party.update(split(x) for x in modules)
requirements = self.requirements
if self.setup.redirected:
# Use requirements from setup if available.
requirements = self.setup.get_requirements(
setup=self.processing_setup_py,
tests=True,
)
# Get 3rd party module names based on requirements.
for requirement in requirements:
modules = [project2module(requirement.project_name)]
if modules[0] in KNOWN_3RD_PARTIES:
modules = KNOWN_3RD_PARTIES[modules[0]]
if modules[0] in self.known_modules:
modules = self.known_modules[modules[0]]
mods_3rd_party.update(split(x) for x in modules)
# When processing setup.py file, forcefully add setuptools to the
# project requirements. Setuptools might be required to build the
# project, even though it is not listed as a requirement - this
# package is required to run setup.py, so listing it as a setup
# requirement would be pointless.
if self.processing_setup_py:
mods_3rd_party.add(split("setuptools"))
for node in ImportVisitor(self.tree).imports:
_mod = split(node.mod)
_alt = split(node.alt)
if any([_mod[0] == x for x in STDLIB]):
continue
if any([modcmp(x, _mod) or modcmp(x, _alt)
for x in mods_1st_party]):
continue
if any([modcmp(x, _mod) or modcmp(x, _alt)
for x in mods_3rd_party]):
continue
yield (
node.line,
node.offset,
ERRORS['I900'].format(pkg=node.mod),
Flake8Checker,
) |
def rebase(self, qemu_img, base_image):
"""
Rebase a linked clone in order to use the correct disk
:param qemu_img: Path to the qemu-img binary
:param base_image: Path to the base image
"""
if not os.path.exists(base_image):
raise FileNotFoundError(base_image)
command = [qemu_img, "rebase", "-u", "-b", base_image, self._path]
process = yield from asyncio.create_subprocess_exec(*command)
retcode = yield from process.wait()
if retcode != 0:
raise Qcow2Error("Could not rebase the image")
self._reload() | Rebase a linked clone in order to use the correct disk
:param qemu_img: Path to the qemu-img binary
:param base_image: Path to the base image | Below is the the instruction that describes the task:
### Input:
Rebase a linked clone in order to use the correct disk
:param qemu_img: Path to the qemu-img binary
:param base_image: Path to the base image
### Response:
def rebase(self, qemu_img, base_image):
"""
Rebase a linked clone in order to use the correct disk
:param qemu_img: Path to the qemu-img binary
:param base_image: Path to the base image
"""
if not os.path.exists(base_image):
raise FileNotFoundError(base_image)
command = [qemu_img, "rebase", "-u", "-b", base_image, self._path]
process = yield from asyncio.create_subprocess_exec(*command)
retcode = yield from process.wait()
if retcode != 0:
raise Qcow2Error("Could not rebase the image")
self._reload() |
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = [] | Remove any created temp paths | Below is the the instruction that describes the task:
### Input:
Remove any created temp paths
### Response:
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = [] |
def load_cookies(self):
"""
Load any stored cookies for the plugin that have not expired.
:return: list of the restored cookie names
"""
if not self.session or not self.cache:
raise RuntimeError("Cannot loaded cached cookies in unbound plugin")
restored = []
for key, value in self.cache.get_all().items():
if key.startswith("__cookie"):
cookie = requests.cookies.create_cookie(**value)
self.session.http.cookies.set_cookie(cookie)
restored.append(cookie.name)
if restored:
self.logger.debug("Restored cookies: {0}".format(", ".join(restored)))
return restored | Load any stored cookies for the plugin that have not expired.
:return: list of the restored cookie names | Below is the the instruction that describes the task:
### Input:
Load any stored cookies for the plugin that have not expired.
:return: list of the restored cookie names
### Response:
def load_cookies(self):
"""
Load any stored cookies for the plugin that have not expired.
:return: list of the restored cookie names
"""
if not self.session or not self.cache:
raise RuntimeError("Cannot loaded cached cookies in unbound plugin")
restored = []
for key, value in self.cache.get_all().items():
if key.startswith("__cookie"):
cookie = requests.cookies.create_cookie(**value)
self.session.http.cookies.set_cookie(cookie)
restored.append(cookie.name)
if restored:
self.logger.debug("Restored cookies: {0}".format(", ".join(restored)))
return restored |
def analyze(self, scratch, **kwargs):
"""Run and return the results from the Animation plugin."""
results = Counter()
for script in self.iter_scripts(scratch):
gen = self.iter_blocks(script.blocks)
name = 'start'
level = None
while name != '':
if name in self.ANIMATION:
gen, count = self._check_animation(name, level, gen)
results.update(count)
name, level, _ = next(gen, ('', 0, ''))
return {'animation': results} | Run and return the results from the Animation plugin. | Below is the the instruction that describes the task:
### Input:
Run and return the results from the Animation plugin.
### Response:
def analyze(self, scratch, **kwargs):
"""Run and return the results from the Animation plugin."""
results = Counter()
for script in self.iter_scripts(scratch):
gen = self.iter_blocks(script.blocks)
name = 'start'
level = None
while name != '':
if name in self.ANIMATION:
gen, count = self._check_animation(name, level, gen)
results.update(count)
name, level, _ = next(gen, ('', 0, ''))
return {'animation': results} |
def modflow_sfr_gag_to_instruction_file(gage_output_file, ins_file=None, parse_filename=False):
"""writes an instruction file for an SFR gage output file to read Flow only at all times
Parameters
----------
gage_output_file : str
the gage output filename (ASCII).
ins_file : str
the name of the instruction file to create. If None, the name
is <gage_output_file>.ins. Default is None
parse_filename : bool
if True, get the gage_num parameter by parsing the gage output file filename
if False, get the gage number from the file itself
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sfr simulated flows.
If inschek was not successfully run, then returns None
ins_file : str
file name of instructions file relating to gage output.
obs_file : str
file name of processed gage output for all times
Note
----
sets up observations for gage outputs only for the Flow column.
if parse_namefile is true, only text up to first '.' is used as the gage_num
TODO : allow other observation types and align explicitly with times - now returns all values
"""
if ins_file is None:
ins_file = gage_output_file + '.ins'
# navigate the file to be sure the header makes sense
indat = [line.strip() for line in open(gage_output_file, 'r').readlines()]
header = [i for i in indat if i.startswith('"')]
# yank out the gage number to identify the observation names
if parse_filename:
gage_num = os.path.basename(gage_output_file).split('.')[0]
else:
gage_num = re.sub("[^0-9]", "", indat[0].lower().split("gage no.")[-1].strip().split()[0])
# get the column names
cols = [i.lower() for i in header if 'data' in i.lower()][0].lower().replace('"', '').replace('data:', '').split()
# make sure "Flow" is included in the columns
if 'flow' not in cols:
raise Exception('Requested field "Flow" not in gage output columns')
# find which column is for "Flow"
flowidx = np.where(np.array(cols) == 'flow')[0][0]
# write out the instruction file lines
inslines = ['l1 ' + (flowidx + 1) * 'w ' + '!g{0}_{1:d}!'.format(gage_num, j)
for j in range(len(indat) - len(header))]
inslines[0] = inslines[0].replace('l1', 'l{0:d}'.format(len(header) + 1))
# write the instruction file
with open(ins_file, 'w') as ofp:
ofp.write('pif ~\n')
[ofp.write('{0}\n'.format(line)) for line in inslines]
df = _try_run_inschek(ins_file, gage_output_file)
if df is not None:
return df, ins_file, gage_output_file
else:
print("Inschek didn't run so nothing returned")
return None | writes an instruction file for an SFR gage output file to read Flow only at all times
Parameters
----------
gage_output_file : str
the gage output filename (ASCII).
ins_file : str
the name of the instruction file to create. If None, the name
is <gage_output_file>.ins. Default is None
parse_filename : bool
if True, get the gage_num parameter by parsing the gage output file filename
if False, get the gage number from the file itself
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sfr simulated flows.
If inschek was not successfully run, then returns None
ins_file : str
file name of instructions file relating to gage output.
obs_file : str
file name of processed gage output for all times
Note
----
sets up observations for gage outputs only for the Flow column.
if parse_namefile is true, only text up to first '.' is used as the gage_num
TODO : allow other observation types and align explicitly with times - now returns all values | Below is the the instruction that describes the task:
### Input:
writes an instruction file for an SFR gage output file to read Flow only at all times
Parameters
----------
gage_output_file : str
the gage output filename (ASCII).
ins_file : str
the name of the instruction file to create. If None, the name
is <gage_output_file>.ins. Default is None
parse_filename : bool
if True, get the gage_num parameter by parsing the gage output file filename
if False, get the gage number from the file itself
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sfr simulated flows.
If inschek was not successfully run, then returns None
ins_file : str
file name of instructions file relating to gage output.
obs_file : str
file name of processed gage output for all times
Note
----
sets up observations for gage outputs only for the Flow column.
if parse_namefile is true, only text up to first '.' is used as the gage_num
TODO : allow other observation types and align explicitly with times - now returns all values
### Response:
def modflow_sfr_gag_to_instruction_file(gage_output_file, ins_file=None, parse_filename=False):
"""writes an instruction file for an SFR gage output file to read Flow only at all times
Parameters
----------
gage_output_file : str
the gage output filename (ASCII).
ins_file : str
the name of the instruction file to create. If None, the name
is <gage_output_file>.ins. Default is None
parse_filename : bool
if True, get the gage_num parameter by parsing the gage output file filename
if False, get the gage number from the file itself
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sfr simulated flows.
If inschek was not successfully run, then returns None
ins_file : str
file name of instructions file relating to gage output.
obs_file : str
file name of processed gage output for all times
Note
----
sets up observations for gage outputs only for the Flow column.
if parse_namefile is true, only text up to first '.' is used as the gage_num
TODO : allow other observation types and align explicitly with times - now returns all values
"""
if ins_file is None:
ins_file = gage_output_file + '.ins'
# navigate the file to be sure the header makes sense
indat = [line.strip() for line in open(gage_output_file, 'r').readlines()]
header = [i for i in indat if i.startswith('"')]
# yank out the gage number to identify the observation names
if parse_filename:
gage_num = os.path.basename(gage_output_file).split('.')[0]
else:
gage_num = re.sub("[^0-9]", "", indat[0].lower().split("gage no.")[-1].strip().split()[0])
# get the column names
cols = [i.lower() for i in header if 'data' in i.lower()][0].lower().replace('"', '').replace('data:', '').split()
# make sure "Flow" is included in the columns
if 'flow' not in cols:
raise Exception('Requested field "Flow" not in gage output columns')
# find which column is for "Flow"
flowidx = np.where(np.array(cols) == 'flow')[0][0]
# write out the instruction file lines
inslines = ['l1 ' + (flowidx + 1) * 'w ' + '!g{0}_{1:d}!'.format(gage_num, j)
for j in range(len(indat) - len(header))]
inslines[0] = inslines[0].replace('l1', 'l{0:d}'.format(len(header) + 1))
# write the instruction file
with open(ins_file, 'w') as ofp:
ofp.write('pif ~\n')
[ofp.write('{0}\n'.format(line)) for line in inslines]
df = _try_run_inschek(ins_file, gage_output_file)
if df is not None:
return df, ins_file, gage_output_file
else:
print("Inschek didn't run so nothing returned")
return None |
def action_range_type(self) -> Sequence[str]:
'''The range type of each action fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent.
'''
fluents = self.domain.action_fluents
ordering = self.domain.action_fluent_ordering
return self._fluent_range_type(fluents, ordering) | The range type of each action fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent. | Below is the the instruction that describes the task:
### Input:
The range type of each action fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent.
### Response:
def action_range_type(self) -> Sequence[str]:
'''The range type of each action fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent.
'''
fluents = self.domain.action_fluents
ordering = self.domain.action_fluent_ordering
return self._fluent_range_type(fluents, ordering) |
def _check_default(value, parameter, default_chars):
'''Returns the default if the value is "empty"'''
# not using a set here because it fails when value is unhashable
if value in default_chars:
if parameter.default is inspect.Parameter.empty:
raise ValueError('Value was empty, but no default value is given in view function for parameter: {} ({})'.format(parameter.position, parameter.name))
return parameter.default
return value | Returns the default if the value is "empty" | Below is the the instruction that describes the task:
### Input:
Returns the default if the value is "empty"
### Response:
def _check_default(value, parameter, default_chars):
'''Returns the default if the value is "empty"'''
# not using a set here because it fails when value is unhashable
if value in default_chars:
if parameter.default is inspect.Parameter.empty:
raise ValueError('Value was empty, but no default value is given in view function for parameter: {} ({})'.format(parameter.position, parameter.name))
return parameter.default
return value |
def _parse_uri(uri):
"""Parse and validate MediaFire URI."""
tokens = urlparse(uri)
if tokens.netloc != '':
logger.error("Invalid URI: %s", uri)
raise ValueError("MediaFire URI format error: "
"host should be empty - mf:///path")
if tokens.scheme != '' and tokens.scheme != URI_SCHEME:
raise ValueError("MediaFire URI format error: "
"must start with 'mf:' or '/'")
return posixpath.normpath(tokens.path) | Parse and validate MediaFire URI. | Below is the the instruction that describes the task:
### Input:
Parse and validate MediaFire URI.
### Response:
def _parse_uri(uri):
"""Parse and validate MediaFire URI."""
tokens = urlparse(uri)
if tokens.netloc != '':
logger.error("Invalid URI: %s", uri)
raise ValueError("MediaFire URI format error: "
"host should be empty - mf:///path")
if tokens.scheme != '' and tokens.scheme != URI_SCHEME:
raise ValueError("MediaFire URI format error: "
"must start with 'mf:' or '/'")
return posixpath.normpath(tokens.path) |
def decorator(func):
"""A function timer decorator."""
def function_timer(*args, **kwargs):
"""A nested function for timing other functions."""
# Capture start time
start = time.time()
# Execute function with arguments
value = func(*args, **kwargs)
# Capture end time
end = time.time()
# Calculate run time
runtime = end - start
if runtime < 60:
runtime = str('sec: ' + str('{:f}'.format(runtime)))
else:
runtime = str('min: ' + str('{:f}'.format(runtime / 60)))
print('{func:50} --> {time}'.format(func=func.__qualname__, time=runtime))
return value
return function_timer | A function timer decorator. | Below is the the instruction that describes the task:
### Input:
A function timer decorator.
### Response:
def decorator(func):
"""A function timer decorator."""
def function_timer(*args, **kwargs):
"""A nested function for timing other functions."""
# Capture start time
start = time.time()
# Execute function with arguments
value = func(*args, **kwargs)
# Capture end time
end = time.time()
# Calculate run time
runtime = end - start
if runtime < 60:
runtime = str('sec: ' + str('{:f}'.format(runtime)))
else:
runtime = str('min: ' + str('{:f}'.format(runtime / 60)))
print('{func:50} --> {time}'.format(func=func.__qualname__, time=runtime))
return value
return function_timer |
def _jobStoreClasses(self):
"""
A list of concrete AbstractJobStore implementations whose dependencies are installed.
:rtype: list[AbstractJobStore]
"""
jobStoreClassNames = (
"toil.jobStores.azureJobStore.AzureJobStore",
"toil.jobStores.fileJobStore.FileJobStore",
"toil.jobStores.googleJobStore.GoogleJobStore",
"toil.jobStores.aws.jobStore.AWSJobStore",
"toil.jobStores.abstractJobStore.JobStoreSupport")
jobStoreClasses = []
for className in jobStoreClassNames:
moduleName, className = className.rsplit('.', 1)
from importlib import import_module
try:
module = import_module(moduleName)
except ImportError:
logger.debug("Unable to import '%s' as is expected if the corresponding extra was "
"omitted at installation time.", moduleName)
else:
jobStoreClass = getattr(module, className)
jobStoreClasses.append(jobStoreClass)
return jobStoreClasses | A list of concrete AbstractJobStore implementations whose dependencies are installed.
:rtype: list[AbstractJobStore] | Below is the the instruction that describes the task:
### Input:
A list of concrete AbstractJobStore implementations whose dependencies are installed.
:rtype: list[AbstractJobStore]
### Response:
def _jobStoreClasses(self):
"""
A list of concrete AbstractJobStore implementations whose dependencies are installed.
:rtype: list[AbstractJobStore]
"""
jobStoreClassNames = (
"toil.jobStores.azureJobStore.AzureJobStore",
"toil.jobStores.fileJobStore.FileJobStore",
"toil.jobStores.googleJobStore.GoogleJobStore",
"toil.jobStores.aws.jobStore.AWSJobStore",
"toil.jobStores.abstractJobStore.JobStoreSupport")
jobStoreClasses = []
for className in jobStoreClassNames:
moduleName, className = className.rsplit('.', 1)
from importlib import import_module
try:
module = import_module(moduleName)
except ImportError:
logger.debug("Unable to import '%s' as is expected if the corresponding extra was "
"omitted at installation time.", moduleName)
else:
jobStoreClass = getattr(module, className)
jobStoreClasses.append(jobStoreClass)
return jobStoreClasses |
def insert(self, table, kwargs, execute=True):
""".. :py:method::
Usage::
>>> insert('hospital', {'id': '12de3wrv', 'province': 'shanghai'})
insert into hospital (id, province) values ('12de3wrv', 'shanghai');
:param string table: table name
:param dict kwargs: name and value
:param bool execute: if not execute, return sql and variables
:rtype: tuple
"""
sql = "insert into " + table + " ({}) values ({});"
keys, values = [], []
[ (keys.append(k), values.append(v)) for k, v in kwargs.iteritems() ]
sql = sql.format(', '.join(keys), ', '.join(['%s']*len(values)))
if execute:
super(PGWrapper, self).execute(sql, values, result=False)
else:
return sql, values | .. :py:method::
Usage::
>>> insert('hospital', {'id': '12de3wrv', 'province': 'shanghai'})
insert into hospital (id, province) values ('12de3wrv', 'shanghai');
:param string table: table name
:param dict kwargs: name and value
:param bool execute: if not execute, return sql and variables
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
.. :py:method::
Usage::
>>> insert('hospital', {'id': '12de3wrv', 'province': 'shanghai'})
insert into hospital (id, province) values ('12de3wrv', 'shanghai');
:param string table: table name
:param dict kwargs: name and value
:param bool execute: if not execute, return sql and variables
:rtype: tuple
### Response:
def insert(self, table, kwargs, execute=True):
""".. :py:method::
Usage::
>>> insert('hospital', {'id': '12de3wrv', 'province': 'shanghai'})
insert into hospital (id, province) values ('12de3wrv', 'shanghai');
:param string table: table name
:param dict kwargs: name and value
:param bool execute: if not execute, return sql and variables
:rtype: tuple
"""
sql = "insert into " + table + " ({}) values ({});"
keys, values = [], []
[ (keys.append(k), values.append(v)) for k, v in kwargs.iteritems() ]
sql = sql.format(', '.join(keys), ', '.join(['%s']*len(values)))
if execute:
super(PGWrapper, self).execute(sql, values, result=False)
else:
return sql, values |
def restart(self, restart_only_stale_services=None,
redeploy_client_configuration=None,
restart_service_names=None):
"""
Restart all services in the cluster.
Services are restarted in the appropriate order given their dependencies.
@param restart_only_stale_services: Only restart services that have stale
configuration and their dependent
services. Default is False.
@param redeploy_client_configuration: Re-deploy client configuration for
all services in the cluster. Default
is False.
@param restart_service_names: Only restart services that are specified and their dependent services.
Available since API v11.
@since API v6
@return: Reference to the submitted command.
"""
if self._get_resource_root().version < 6:
return self._cmd('restart')
else:
args = dict()
args['restartOnlyStaleServices'] = restart_only_stale_services
args['redeployClientConfiguration'] = redeploy_client_configuration
if self._get_resource_root().version >= 11:
args['restartServiceNames'] = restart_service_names
return self._cmd('restart', data=args, api_version=6) | Restart all services in the cluster.
Services are restarted in the appropriate order given their dependencies.
@param restart_only_stale_services: Only restart services that have stale
configuration and their dependent
services. Default is False.
@param redeploy_client_configuration: Re-deploy client configuration for
all services in the cluster. Default
is False.
@param restart_service_names: Only restart services that are specified and their dependent services.
Available since API v11.
@since API v6
@return: Reference to the submitted command. | Below is the the instruction that describes the task:
### Input:
Restart all services in the cluster.
Services are restarted in the appropriate order given their dependencies.
@param restart_only_stale_services: Only restart services that have stale
configuration and their dependent
services. Default is False.
@param redeploy_client_configuration: Re-deploy client configuration for
all services in the cluster. Default
is False.
@param restart_service_names: Only restart services that are specified and their dependent services.
Available since API v11.
@since API v6
@return: Reference to the submitted command.
### Response:
def restart(self, restart_only_stale_services=None,
redeploy_client_configuration=None,
restart_service_names=None):
"""
Restart all services in the cluster.
Services are restarted in the appropriate order given their dependencies.
@param restart_only_stale_services: Only restart services that have stale
configuration and their dependent
services. Default is False.
@param redeploy_client_configuration: Re-deploy client configuration for
all services in the cluster. Default
is False.
@param restart_service_names: Only restart services that are specified and their dependent services.
Available since API v11.
@since API v6
@return: Reference to the submitted command.
"""
if self._get_resource_root().version < 6:
return self._cmd('restart')
else:
args = dict()
args['restartOnlyStaleServices'] = restart_only_stale_services
args['redeployClientConfiguration'] = redeploy_client_configuration
if self._get_resource_root().version >= 11:
args['restartServiceNames'] = restart_service_names
return self._cmd('restart', data=args, api_version=6) |
def get_annotations(self):
'''
This method gets the annotations for the queryset. Unlike get_ordering() below, it
passes the actual Case() and F() objects that will be evaluated with the queryset, returned
in a dictionary that is compatible with get_ordering().
'''
rule = getConstant('registration__orgRule')
# Initialize with null values that get filled in based on the logic below.
annotations = {
'nullParam': Case(default_value=None,output_field=IntegerField()),
'paramOne': Case(default_value=None,output_field=IntegerField()),
'paramTwo': Case(default_value=None,output_field=IntegerField()),
}
if rule == 'SessionFirst':
annotations.update({
'nullParam': Case(
When(session__startTime__isnull=False, then=0),
When(month__isnull=False, then=1),
default_value=2,
output_field=IntegerField()
),
'paramOne': F('session__startTime'),
'paramTwo': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()),
})
elif rule == 'SessionAlphaFirst':
annotations.update({
'nullParam': Case(
When(session__name__isnull=False, then=0),
When(month__isnull=False, then=1),
default_value=2,
output_field=IntegerField()
),
'paramOne': F('session__name'),
'paramTwo': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()),
})
elif rule == 'Month':
annotations.update({
'nullParam': Case(
When(month__isnull=False, then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': ExpressionWrapper(12*F('year') + F('month'), output_field=IntegerField()),
})
elif rule == 'Session':
annotations.update({
'nullParam': Case(
When(session__startTime__isnull=False, then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': F('session__startTime'),
})
elif rule == 'SessionAlpha':
annotations.update({
'nullParam': Case(
When(session__name__isnull=False, then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': F('session__name'),
})
elif rule == 'SessionMonth':
annotations.update({
'nullParam': Case(
When(Q(session__startTime__isnull=False) & Q(month__isnull=False), then=0),
When(Q(session__startTime__isnull=True) & Q(month__isnull=False), then=1),
When(Q(session__startTime__isnull=False) & Q(month__isnull=True), then=2),
default_value=3,
output_field=IntegerField()
),
'paramOne': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()),
'paramTwo': F('session__startTime'),
})
elif rule == 'SessionAlphaMonth':
annotations.update({
'nullParam': Case(
When(Q(session__name__isnull=False) & Q(month__isnull=False), then=0),
When(Q(session__name__isnull=True) & Q(month__isnull=False), then=1),
When(Q(session__name__isnull=False) & Q(month__isnull=True), then=2),
default_value=3,
output_field=IntegerField()
),
'paramOne': ExpressionWrapper(12*F('year') + F('month'), output_field=IntegerField()),
'paramTwo': F('session__name'),
})
elif rule == 'Weekday':
annotations.update({
'nullParam': Case(
When(startTime__week_day__isnull=False, then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': ExtractWeekDay('startTime'),
})
elif rule == 'MonthWeekday':
annotations.update({
'nullParam': Case(
When(Q(month__isnull=False) & Q(startTime__week_day__isnull=False), then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': ExpressionWrapper(12*F('year') + F('month'), output_field=IntegerField()),
'paramTwo': ExtractWeekDay('startTime'),
})
return annotations | This method gets the annotations for the queryset. Unlike get_ordering() below, it
passes the actual Case() and F() objects that will be evaluated with the queryset, returned
in a dictionary that is compatible with get_ordering(). | Below is the the instruction that describes the task:
### Input:
This method gets the annotations for the queryset. Unlike get_ordering() below, it
passes the actual Case() and F() objects that will be evaluated with the queryset, returned
in a dictionary that is compatible with get_ordering().
### Response:
def get_annotations(self):
'''
This method gets the annotations for the queryset. Unlike get_ordering() below, it
passes the actual Case() and F() objects that will be evaluated with the queryset, returned
in a dictionary that is compatible with get_ordering().
'''
rule = getConstant('registration__orgRule')
# Initialize with null values that get filled in based on the logic below.
annotations = {
'nullParam': Case(default_value=None,output_field=IntegerField()),
'paramOne': Case(default_value=None,output_field=IntegerField()),
'paramTwo': Case(default_value=None,output_field=IntegerField()),
}
if rule == 'SessionFirst':
annotations.update({
'nullParam': Case(
When(session__startTime__isnull=False, then=0),
When(month__isnull=False, then=1),
default_value=2,
output_field=IntegerField()
),
'paramOne': F('session__startTime'),
'paramTwo': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()),
})
elif rule == 'SessionAlphaFirst':
annotations.update({
'nullParam': Case(
When(session__name__isnull=False, then=0),
When(month__isnull=False, then=1),
default_value=2,
output_field=IntegerField()
),
'paramOne': F('session__name'),
'paramTwo': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()),
})
elif rule == 'Month':
annotations.update({
'nullParam': Case(
When(month__isnull=False, then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': ExpressionWrapper(12*F('year') + F('month'), output_field=IntegerField()),
})
elif rule == 'Session':
annotations.update({
'nullParam': Case(
When(session__startTime__isnull=False, then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': F('session__startTime'),
})
elif rule == 'SessionAlpha':
annotations.update({
'nullParam': Case(
When(session__name__isnull=False, then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': F('session__name'),
})
elif rule == 'SessionMonth':
annotations.update({
'nullParam': Case(
When(Q(session__startTime__isnull=False) & Q(month__isnull=False), then=0),
When(Q(session__startTime__isnull=True) & Q(month__isnull=False), then=1),
When(Q(session__startTime__isnull=False) & Q(month__isnull=True), then=2),
default_value=3,
output_field=IntegerField()
),
'paramOne': ExpressionWrapper(12 * F('year') + F('month'), output_field=IntegerField()),
'paramTwo': F('session__startTime'),
})
elif rule == 'SessionAlphaMonth':
annotations.update({
'nullParam': Case(
When(Q(session__name__isnull=False) & Q(month__isnull=False), then=0),
When(Q(session__name__isnull=True) & Q(month__isnull=False), then=1),
When(Q(session__name__isnull=False) & Q(month__isnull=True), then=2),
default_value=3,
output_field=IntegerField()
),
'paramOne': ExpressionWrapper(12*F('year') + F('month'), output_field=IntegerField()),
'paramTwo': F('session__name'),
})
elif rule == 'Weekday':
annotations.update({
'nullParam': Case(
When(startTime__week_day__isnull=False, then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': ExtractWeekDay('startTime'),
})
elif rule == 'MonthWeekday':
annotations.update({
'nullParam': Case(
When(Q(month__isnull=False) & Q(startTime__week_day__isnull=False), then=0),
default_value=1,
output_field=IntegerField()
),
'paramOne': ExpressionWrapper(12*F('year') + F('month'), output_field=IntegerField()),
'paramTwo': ExtractWeekDay('startTime'),
})
return annotations |
def get_en2fr(url='http://www.manythings.org/anki/fra-eng.zip'):
""" Download and parse English->French translation dataset used in Keras seq2seq example """
download_unzip(url)
return pd.read_table(url, compression='zip', header=None, skip_blank_lines=True, sep='\t', skiprows=0, names='en fr'.split()) | Download and parse English->French translation dataset used in Keras seq2seq example | Below is the the instruction that describes the task:
### Input:
Download and parse English->French translation dataset used in Keras seq2seq example
### Response:
def get_en2fr(url='http://www.manythings.org/anki/fra-eng.zip'):
""" Download and parse English->French translation dataset used in Keras seq2seq example """
download_unzip(url)
return pd.read_table(url, compression='zip', header=None, skip_blank_lines=True, sep='\t', skiprows=0, names='en fr'.split()) |
def publish(self, topic, data, defer=None):
"""Publish a message to the given topic over http.
:param topic: the topic to publish to
:param data: bytestring data to publish
:param defer: duration in millisconds to defer before publishing
(requires nsq 0.3.6)
"""
nsq.assert_valid_topic_name(topic)
fields = {'topic': topic}
if defer is not None:
fields['defer'] = '{}'.format(defer)
return self._request('POST', '/pub', fields=fields, body=data) | Publish a message to the given topic over http.
:param topic: the topic to publish to
:param data: bytestring data to publish
:param defer: duration in millisconds to defer before publishing
(requires nsq 0.3.6) | Below is the the instruction that describes the task:
### Input:
Publish a message to the given topic over http.
:param topic: the topic to publish to
:param data: bytestring data to publish
:param defer: duration in millisconds to defer before publishing
(requires nsq 0.3.6)
### Response:
def publish(self, topic, data, defer=None):
"""Publish a message to the given topic over http.
:param topic: the topic to publish to
:param data: bytestring data to publish
:param defer: duration in millisconds to defer before publishing
(requires nsq 0.3.6)
"""
nsq.assert_valid_topic_name(topic)
fields = {'topic': topic}
if defer is not None:
fields['defer'] = '{}'.format(defer)
return self._request('POST', '/pub', fields=fields, body=data) |
def _ordereddict2dict(input_ordered_dict):
'''
Convert ordered dictionary to a dictionary
'''
return salt.utils.json.loads(salt.utils.json.dumps(input_ordered_dict)) | Convert ordered dictionary to a dictionary | Below is the the instruction that describes the task:
### Input:
Convert ordered dictionary to a dictionary
### Response:
def _ordereddict2dict(input_ordered_dict):
'''
Convert ordered dictionary to a dictionary
'''
return salt.utils.json.loads(salt.utils.json.dumps(input_ordered_dict)) |
def global_injector_decorator(inject_globals):
'''
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
'''
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with salt.utils.context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
return wrapper
return inner_decorator | Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject | Below is the the instruction that describes the task:
### Input:
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
### Response:
def global_injector_decorator(inject_globals):
'''
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
'''
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with salt.utils.context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
return wrapper
return inner_decorator |
def _read_interleaved_numpy(self, f, data_objects):
"""Read interleaved data where all channels have a numpy type"""
log.debug("Reading interleaved data all at once")
# Read all data into 1 byte unsigned ints first
all_channel_bytes = data_objects[0].raw_data_width
if all_channel_bytes == 0:
all_channel_bytes = sum((o.data_type.size for o in data_objects))
log.debug("all_channel_bytes: %d", all_channel_bytes)
number_bytes = int(all_channel_bytes * data_objects[0].number_values)
combined_data = fromfile(f, dtype=np.uint8, count=number_bytes)
# Reshape, so that one row is all bytes for all objects
combined_data = combined_data.reshape(-1, all_channel_bytes)
# Now set arrays for each channel
data_pos = 0
for (i, obj) in enumerate(data_objects):
byte_columns = tuple(
range(data_pos, obj.data_type.size + data_pos))
log.debug("Byte columns for channel %d: %s", i, byte_columns)
# Select columns for this channel, so that number of values will
# be number of bytes per point * number of data points.
# Then use ravel to flatten the results into a vector.
object_data = combined_data[:, byte_columns].ravel()
# Now set correct data type, so that the array length should
# be correct
object_data.dtype = (
np.dtype(obj.data_type.nptype).newbyteorder(self.endianness))
obj.tdms_object._update_data(object_data)
data_pos += obj.data_type.size | Read interleaved data where all channels have a numpy type | Below is the the instruction that describes the task:
### Input:
Read interleaved data where all channels have a numpy type
### Response:
def _read_interleaved_numpy(self, f, data_objects):
"""Read interleaved data where all channels have a numpy type"""
log.debug("Reading interleaved data all at once")
# Read all data into 1 byte unsigned ints first
all_channel_bytes = data_objects[0].raw_data_width
if all_channel_bytes == 0:
all_channel_bytes = sum((o.data_type.size for o in data_objects))
log.debug("all_channel_bytes: %d", all_channel_bytes)
number_bytes = int(all_channel_bytes * data_objects[0].number_values)
combined_data = fromfile(f, dtype=np.uint8, count=number_bytes)
# Reshape, so that one row is all bytes for all objects
combined_data = combined_data.reshape(-1, all_channel_bytes)
# Now set arrays for each channel
data_pos = 0
for (i, obj) in enumerate(data_objects):
byte_columns = tuple(
range(data_pos, obj.data_type.size + data_pos))
log.debug("Byte columns for channel %d: %s", i, byte_columns)
# Select columns for this channel, so that number of values will
# be number of bytes per point * number of data points.
# Then use ravel to flatten the results into a vector.
object_data = combined_data[:, byte_columns].ravel()
# Now set correct data type, so that the array length should
# be correct
object_data.dtype = (
np.dtype(obj.data_type.nptype).newbyteorder(self.endianness))
obj.tdms_object._update_data(object_data)
data_pos += obj.data_type.size |
def objectify(self, load, node, depth=2, path=None):
""" Given a node ID, return an object the information available about
this node. This accepts a loader function as it's first argument, which
is expected to return all tuples of (predicate, object, source) for
the given subject. """
if path is None:
path = set()
if self.is_object:
if depth < 1:
return
return self._objectify_object(load, node, depth, path)
elif self.is_array:
if depth < 1:
return
return [self.items.objectify(load, node, depth, path)]
else:
return node | Given a node ID, return an object the information available about
this node. This accepts a loader function as it's first argument, which
is expected to return all tuples of (predicate, object, source) for
the given subject. | Below is the the instruction that describes the task:
### Input:
Given a node ID, return an object the information available about
this node. This accepts a loader function as it's first argument, which
is expected to return all tuples of (predicate, object, source) for
the given subject.
### Response:
def objectify(self, load, node, depth=2, path=None):
""" Given a node ID, return an object the information available about
this node. This accepts a loader function as it's first argument, which
is expected to return all tuples of (predicate, object, source) for
the given subject. """
if path is None:
path = set()
if self.is_object:
if depth < 1:
return
return self._objectify_object(load, node, depth, path)
elif self.is_array:
if depth < 1:
return
return [self.items.objectify(load, node, depth, path)]
else:
return node |
def add_cluster_admin(self, new_username, new_password):
"""Add cluster admin."""
data = {
'name': new_username,
'password': new_password
}
self.request(
url="cluster_admins",
method='POST',
data=data,
expected_response_code=200
)
return True | Add cluster admin. | Below is the the instruction that describes the task:
### Input:
Add cluster admin.
### Response:
def add_cluster_admin(self, new_username, new_password):
"""Add cluster admin."""
data = {
'name': new_username,
'password': new_password
}
self.request(
url="cluster_admins",
method='POST',
data=data,
expected_response_code=200
)
return True |
def difference(self, another_moc, *args):
"""
Difference between the MOC instance and other MOCs.
Parameters
----------
another_moc : `~mocpy.moc.MOC`
The MOC used that will be substracted to self.
args : `~mocpy.moc.MOC`
Other additional MOCs to perform the difference with.
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
The resulting MOC.
"""
interval_set = self._interval_set.difference(another_moc._interval_set)
for moc in args:
interval_set = interval_set.difference(moc._interval_set)
return self.__class__(interval_set) | Difference between the MOC instance and other MOCs.
Parameters
----------
another_moc : `~mocpy.moc.MOC`
The MOC used that will be substracted to self.
args : `~mocpy.moc.MOC`
Other additional MOCs to perform the difference with.
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
The resulting MOC. | Below is the the instruction that describes the task:
### Input:
Difference between the MOC instance and other MOCs.
Parameters
----------
another_moc : `~mocpy.moc.MOC`
The MOC used that will be substracted to self.
args : `~mocpy.moc.MOC`
Other additional MOCs to perform the difference with.
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
The resulting MOC.
### Response:
def difference(self, another_moc, *args):
"""
Difference between the MOC instance and other MOCs.
Parameters
----------
another_moc : `~mocpy.moc.MOC`
The MOC used that will be substracted to self.
args : `~mocpy.moc.MOC`
Other additional MOCs to perform the difference with.
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
The resulting MOC.
"""
interval_set = self._interval_set.difference(another_moc._interval_set)
for moc in args:
interval_set = interval_set.difference(moc._interval_set)
return self.__class__(interval_set) |
def _to_representation(self, instance):
"""Uncached `to_representation`."""
if self.enable_optimization:
representation = self._faster_to_representation(instance)
else:
representation = super(
WithDynamicSerializerMixin,
self
).to_representation(instance)
if settings.ENABLE_LINKS:
# TODO: Make this function configurable to support other
# formats like JSON API link objects.
representation = merge_link_object(
self, representation, instance
)
if self.debug:
representation['_meta'] = {
'id': instance.pk,
'type': self.get_plural_name()
}
# tag the representation with the serializer and instance
return tag_dict(
representation,
serializer=self,
instance=instance,
embed=self.embed
) | Uncached `to_representation`. | Below is the the instruction that describes the task:
### Input:
Uncached `to_representation`.
### Response:
def _to_representation(self, instance):
"""Uncached `to_representation`."""
if self.enable_optimization:
representation = self._faster_to_representation(instance)
else:
representation = super(
WithDynamicSerializerMixin,
self
).to_representation(instance)
if settings.ENABLE_LINKS:
# TODO: Make this function configurable to support other
# formats like JSON API link objects.
representation = merge_link_object(
self, representation, instance
)
if self.debug:
representation['_meta'] = {
'id': instance.pk,
'type': self.get_plural_name()
}
# tag the representation with the serializer and instance
return tag_dict(
representation,
serializer=self,
instance=instance,
embed=self.embed
) |
def move_group(self, group = None, parent = None):
"""Append group to a new parent.
group and parent must be v1Group-instances.
"""
if group is None or type(group) is not v1Group:
raise KPError("A valid group must be given.")
elif parent is not None and type(parent) is not v1Group:
raise KPError("parent must be a v1Group.")
elif group is parent:
raise KPError("group and parent must not be the same group")
if parent is None:
parent = self.root_group
if group in self.groups:
self.groups.remove(group)
group.parent.children.remove(group)
group.parent = parent
if parent.children:
if parent.children[-1] is self.groups[-1]:
self.groups.append(group)
else:
new_index = self.groups.index(parent.children[-1]) + 1
self.groups.insert(new_index, group)
else:
new_index = self.groups.index(parent) + 1
self.groups.insert(new_index, group)
parent.children.append(group)
if parent is self.root_group:
group.level = 0
else:
group.level = parent.level + 1
if group.children:
self._move_group_helper(group)
group.last_mod = datetime.now().replace(microsecond=0)
return True
else:
raise KPError("Didn't find given group.") | Append group to a new parent.
group and parent must be v1Group-instances. | Below is the the instruction that describes the task:
### Input:
Append group to a new parent.
group and parent must be v1Group-instances.
### Response:
def move_group(self, group = None, parent = None):
"""Append group to a new parent.
group and parent must be v1Group-instances.
"""
if group is None or type(group) is not v1Group:
raise KPError("A valid group must be given.")
elif parent is not None and type(parent) is not v1Group:
raise KPError("parent must be a v1Group.")
elif group is parent:
raise KPError("group and parent must not be the same group")
if parent is None:
parent = self.root_group
if group in self.groups:
self.groups.remove(group)
group.parent.children.remove(group)
group.parent = parent
if parent.children:
if parent.children[-1] is self.groups[-1]:
self.groups.append(group)
else:
new_index = self.groups.index(parent.children[-1]) + 1
self.groups.insert(new_index, group)
else:
new_index = self.groups.index(parent) + 1
self.groups.insert(new_index, group)
parent.children.append(group)
if parent is self.root_group:
group.level = 0
else:
group.level = parent.level + 1
if group.children:
self._move_group_helper(group)
group.last_mod = datetime.now().replace(microsecond=0)
return True
else:
raise KPError("Didn't find given group.") |
def filebrowser_remove_file(request, item_id, file_type):
""" Remove file """
fobj = get_object_or_404(FileBrowserFile, file_type=file_type, id=item_id)
fobj.delete()
if file_type == 'doc':
return HttpResponseRedirect(reverse('mce-filebrowser-documents'))
return HttpResponseRedirect(reverse('mce-filebrowser-images')) | Remove file | Below is the the instruction that describes the task:
### Input:
Remove file
### Response:
def filebrowser_remove_file(request, item_id, file_type):
""" Remove file """
fobj = get_object_or_404(FileBrowserFile, file_type=file_type, id=item_id)
fobj.delete()
if file_type == 'doc':
return HttpResponseRedirect(reverse('mce-filebrowser-documents'))
return HttpResponseRedirect(reverse('mce-filebrowser-images')) |
def _ParseDateTimeValue(self, parser_mediator, date_time_value):
"""Parses a date time value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
date_time_value (str): date time value
(CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ".
Returns:
dfdatetime.TimeElements: date and time extracted from the value or None
if the value does not represent a valid string.
"""
if date_time_value[14] != 'Z':
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None
try:
year = int(date_time_value[0:4], 10)
month = int(date_time_value[4:6], 10)
day_of_month = int(date_time_value[6:8], 10)
hours = int(date_time_value[8:10], 10)
minutes = int(date_time_value[10:12], 10)
seconds = int(date_time_value[12:14], 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
return dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None | Parses a date time value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
date_time_value (str): date time value
(CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ".
Returns:
dfdatetime.TimeElements: date and time extracted from the value or None
if the value does not represent a valid string. | Below is the the instruction that describes the task:
### Input:
Parses a date time value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
date_time_value (str): date time value
(CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ".
Returns:
dfdatetime.TimeElements: date and time extracted from the value or None
if the value does not represent a valid string.
### Response:
def _ParseDateTimeValue(self, parser_mediator, date_time_value):
"""Parses a date time value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
date_time_value (str): date time value
(CSSM_DB_ATTRIBUTE_FORMAT_TIME_DATE) in the format: "YYYYMMDDhhmmssZ".
Returns:
dfdatetime.TimeElements: date and time extracted from the value or None
if the value does not represent a valid string.
"""
if date_time_value[14] != 'Z':
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None
try:
year = int(date_time_value[0:4], 10)
month = int(date_time_value[4:6], 10)
day_of_month = int(date_time_value[6:8], 10)
hours = int(date_time_value[8:10], 10)
minutes = int(date_time_value[10:12], 10)
seconds = int(date_time_value[12:14], 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
return dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date and time value: {0!s}'.format(date_time_value))
return None |
def adjustMask(self):
"""
Updates the alpha mask for this popup widget.
"""
if self.currentMode() == XPopupWidget.Mode.Dialog:
self.clearMask()
return
path = self.borderPath()
bitmap = QBitmap(self.width(), self.height())
bitmap.fill(QColor('white'))
with XPainter(bitmap) as painter:
painter.setRenderHint(XPainter.Antialiasing)
pen = QPen(QColor('black'))
pen.setWidthF(0.75)
painter.setPen(pen)
painter.setBrush(QColor('black'))
painter.drawPath(path)
self.setMask(bitmap) | Updates the alpha mask for this popup widget. | Below is the the instruction that describes the task:
### Input:
Updates the alpha mask for this popup widget.
### Response:
def adjustMask(self):
"""
Updates the alpha mask for this popup widget.
"""
if self.currentMode() == XPopupWidget.Mode.Dialog:
self.clearMask()
return
path = self.borderPath()
bitmap = QBitmap(self.width(), self.height())
bitmap.fill(QColor('white'))
with XPainter(bitmap) as painter:
painter.setRenderHint(XPainter.Antialiasing)
pen = QPen(QColor('black'))
pen.setWidthF(0.75)
painter.setPen(pen)
painter.setBrush(QColor('black'))
painter.drawPath(path)
self.setMask(bitmap) |
def get_country(self):
"""
:return: Country or None
"""
response = self._session.fetch("users.get", user_ids=self.id, fields="country")[0]
if response.get('country'):
return Country.from_json(self._session, response.get('country')) | :return: Country or None | Below is the the instruction that describes the task:
### Input:
:return: Country or None
### Response:
def get_country(self):
"""
:return: Country or None
"""
response = self._session.fetch("users.get", user_ids=self.id, fields="country")[0]
if response.get('country'):
return Country.from_json(self._session, response.get('country')) |
async def remove(self) -> None:
"""
Remove serialized wallet if it exists.
"""
LOGGER.debug('Wallet.remove >>>')
try:
LOGGER.info('Removing wallet: %s', self.name)
await wallet.delete_wallet(json.dumps(self.cfg), json.dumps(self.access_creds))
except IndyError as x_indy:
LOGGER.info('Abstaining from wallet removal; indy-sdk error code %s', x_indy.error_code)
LOGGER.debug('Wallet.remove <<<') | Remove serialized wallet if it exists. | Below is the the instruction that describes the task:
### Input:
Remove serialized wallet if it exists.
### Response:
async def remove(self) -> None:
"""
Remove serialized wallet if it exists.
"""
LOGGER.debug('Wallet.remove >>>')
try:
LOGGER.info('Removing wallet: %s', self.name)
await wallet.delete_wallet(json.dumps(self.cfg), json.dumps(self.access_creds))
except IndyError as x_indy:
LOGGER.info('Abstaining from wallet removal; indy-sdk error code %s', x_indy.error_code)
LOGGER.debug('Wallet.remove <<<') |
def retrieve_public_key(user_repo):
"""Retrieve the public key from the Travis API.
The Travis API response is accessed as JSON so that Travis-Encrypt
can easily find the public key that is to be passed to cryptography's
load_pem_public_key function. Due to issues with some public keys being
returned from the Travis API as PKCS8 encoded, the key is returned with
RSA removed from the header and footer.
Parameters
----------
user_repo: str
the repository in the format of 'username/repository'
Returns
-------
response: str
the public RSA key of the username's repository
Raises
------
InvalidCredentialsError
raised when an invalid 'username/repository' is given
"""
url = 'https://api.travis-ci.org/repos/{}/key' .format(user_repo)
response = requests.get(url)
try:
return response.json()['key'].replace(' RSA ', ' ')
except KeyError:
username, repository = user_repo.split('/')
raise InvalidCredentialsError("Either the username: '{}' or the repository: '{}' does not exist. Please enter a valid username or repository name. The username and repository name are both case sensitive." .format(username, repository)) | Retrieve the public key from the Travis API.
The Travis API response is accessed as JSON so that Travis-Encrypt
can easily find the public key that is to be passed to cryptography's
load_pem_public_key function. Due to issues with some public keys being
returned from the Travis API as PKCS8 encoded, the key is returned with
RSA removed from the header and footer.
Parameters
----------
user_repo: str
the repository in the format of 'username/repository'
Returns
-------
response: str
the public RSA key of the username's repository
Raises
------
InvalidCredentialsError
raised when an invalid 'username/repository' is given | Below is the the instruction that describes the task:
### Input:
Retrieve the public key from the Travis API.
The Travis API response is accessed as JSON so that Travis-Encrypt
can easily find the public key that is to be passed to cryptography's
load_pem_public_key function. Due to issues with some public keys being
returned from the Travis API as PKCS8 encoded, the key is returned with
RSA removed from the header and footer.
Parameters
----------
user_repo: str
the repository in the format of 'username/repository'
Returns
-------
response: str
the public RSA key of the username's repository
Raises
------
InvalidCredentialsError
raised when an invalid 'username/repository' is given
### Response:
def retrieve_public_key(user_repo):
"""Retrieve the public key from the Travis API.
The Travis API response is accessed as JSON so that Travis-Encrypt
can easily find the public key that is to be passed to cryptography's
load_pem_public_key function. Due to issues with some public keys being
returned from the Travis API as PKCS8 encoded, the key is returned with
RSA removed from the header and footer.
Parameters
----------
user_repo: str
the repository in the format of 'username/repository'
Returns
-------
response: str
the public RSA key of the username's repository
Raises
------
InvalidCredentialsError
raised when an invalid 'username/repository' is given
"""
url = 'https://api.travis-ci.org/repos/{}/key' .format(user_repo)
response = requests.get(url)
try:
return response.json()['key'].replace(' RSA ', ' ')
except KeyError:
username, repository = user_repo.split('/')
raise InvalidCredentialsError("Either the username: '{}' or the repository: '{}' does not exist. Please enter a valid username or repository name. The username and repository name are both case sensitive." .format(username, repository)) |
def DragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = 1, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Simulate mouse left button drag from point x1, y1 drop to point x2, y2.
x1: int.
y1: int.
x2: int.
y2: int.
moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.
waitTime: float.
"""
PressMouse(x1, y1, 0.05)
MoveTo(x2, y2, moveSpeed, 0.05)
ReleaseMouse(waitTime) | Simulate mouse left button drag from point x1, y1 drop to point x2, y2.
x1: int.
y1: int.
x2: int.
y2: int.
moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.
waitTime: float. | Below is the the instruction that describes the task:
### Input:
Simulate mouse left button drag from point x1, y1 drop to point x2, y2.
x1: int.
y1: int.
x2: int.
y2: int.
moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.
waitTime: float.
### Response:
def DragDrop(x1: int, y1: int, x2: int, y2: int, moveSpeed: float = 1, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Simulate mouse left button drag from point x1, y1 drop to point x2, y2.
x1: int.
y1: int.
x2: int.
y2: int.
moveSpeed: float, 1 normal speed, < 1 move slower, > 1 move faster.
waitTime: float.
"""
PressMouse(x1, y1, 0.05)
MoveTo(x2, y2, moveSpeed, 0.05)
ReleaseMouse(waitTime) |
def generate_validator_constructor(ns, data_type):
"""
Given a Stone data type, returns a string that can be used to construct
the appropriate validation object in Python.
"""
dt, nullable_dt = unwrap_nullable(data_type)
if is_list_type(dt):
v = generate_func_call(
'bv.List',
args=[
generate_validator_constructor(ns, dt.data_type)],
kwargs=[
('min_items', dt.min_items),
('max_items', dt.max_items)],
)
elif is_map_type(dt):
v = generate_func_call(
'bv.Map',
args=[
generate_validator_constructor(ns, dt.key_data_type),
generate_validator_constructor(ns, dt.value_data_type),
]
)
elif is_numeric_type(dt):
v = generate_func_call(
'bv.{}'.format(dt.name),
kwargs=[
('min_value', dt.min_value),
('max_value', dt.max_value)],
)
elif is_string_type(dt):
pattern = None
if dt.pattern is not None:
pattern = repr(dt.pattern)
v = generate_func_call(
'bv.String',
kwargs=[
('min_length', dt.min_length),
('max_length', dt.max_length),
('pattern', pattern)],
)
elif is_timestamp_type(dt):
v = generate_func_call(
'bv.Timestamp',
args=[repr(dt.format)],
)
elif is_user_defined_type(dt):
v = fmt_class(dt.name) + '_validator'
if ns.name != dt.namespace.name:
v = '{}.{}'.format(fmt_namespace(dt.namespace.name), v)
elif is_alias(dt):
# Assume that the alias has already been declared elsewhere.
name = fmt_class(dt.name) + '_validator'
if ns.name != dt.namespace.name:
name = '{}.{}'.format(fmt_namespace(dt.namespace.name), name)
v = name
elif is_boolean_type(dt) or is_bytes_type(dt) or is_void_type(dt):
v = generate_func_call('bv.{}'.format(dt.name))
else:
raise AssertionError('Unsupported data type: %r' % dt)
if nullable_dt:
return generate_func_call('bv.Nullable', args=[v])
else:
return v | Given a Stone data type, returns a string that can be used to construct
the appropriate validation object in Python. | Below is the the instruction that describes the task:
### Input:
Given a Stone data type, returns a string that can be used to construct
the appropriate validation object in Python.
### Response:
def generate_validator_constructor(ns, data_type):
"""
Given a Stone data type, returns a string that can be used to construct
the appropriate validation object in Python.
"""
dt, nullable_dt = unwrap_nullable(data_type)
if is_list_type(dt):
v = generate_func_call(
'bv.List',
args=[
generate_validator_constructor(ns, dt.data_type)],
kwargs=[
('min_items', dt.min_items),
('max_items', dt.max_items)],
)
elif is_map_type(dt):
v = generate_func_call(
'bv.Map',
args=[
generate_validator_constructor(ns, dt.key_data_type),
generate_validator_constructor(ns, dt.value_data_type),
]
)
elif is_numeric_type(dt):
v = generate_func_call(
'bv.{}'.format(dt.name),
kwargs=[
('min_value', dt.min_value),
('max_value', dt.max_value)],
)
elif is_string_type(dt):
pattern = None
if dt.pattern is not None:
pattern = repr(dt.pattern)
v = generate_func_call(
'bv.String',
kwargs=[
('min_length', dt.min_length),
('max_length', dt.max_length),
('pattern', pattern)],
)
elif is_timestamp_type(dt):
v = generate_func_call(
'bv.Timestamp',
args=[repr(dt.format)],
)
elif is_user_defined_type(dt):
v = fmt_class(dt.name) + '_validator'
if ns.name != dt.namespace.name:
v = '{}.{}'.format(fmt_namespace(dt.namespace.name), v)
elif is_alias(dt):
# Assume that the alias has already been declared elsewhere.
name = fmt_class(dt.name) + '_validator'
if ns.name != dt.namespace.name:
name = '{}.{}'.format(fmt_namespace(dt.namespace.name), name)
v = name
elif is_boolean_type(dt) or is_bytes_type(dt) or is_void_type(dt):
v = generate_func_call('bv.{}'.format(dt.name))
else:
raise AssertionError('Unsupported data type: %r' % dt)
if nullable_dt:
return generate_func_call('bv.Nullable', args=[v])
else:
return v |
def color_split_position(self):
"""The SVG x position where the color split should occur."""
return self.get_text_width(' ') + self.label_width + \
int(float(self.font_width) * float(self.num_padding_chars)) | The SVG x position where the color split should occur. | Below is the the instruction that describes the task:
### Input:
The SVG x position where the color split should occur.
### Response:
def color_split_position(self):
"""The SVG x position where the color split should occur."""
return self.get_text_width(' ') + self.label_width + \
int(float(self.font_width) * float(self.num_padding_chars)) |
def piece_file(input_f, chunk_size):
"""
Provides a streaming interface to file data in chunks of even size, which
avoids memoryerrors from loading whole files into RAM to pass to `pieces`.
"""
chunk = input_f.read(chunk_size)
total_bytes = 0
while chunk:
yield chunk
chunk = input_f.read(chunk_size)
total_bytes += len(chunk) | Provides a streaming interface to file data in chunks of even size, which
avoids memoryerrors from loading whole files into RAM to pass to `pieces`. | Below is the the instruction that describes the task:
### Input:
Provides a streaming interface to file data in chunks of even size, which
avoids memoryerrors from loading whole files into RAM to pass to `pieces`.
### Response:
def piece_file(input_f, chunk_size):
"""
Provides a streaming interface to file data in chunks of even size, which
avoids memoryerrors from loading whole files into RAM to pass to `pieces`.
"""
chunk = input_f.read(chunk_size)
total_bytes = 0
while chunk:
yield chunk
chunk = input_f.read(chunk_size)
total_bytes += len(chunk) |
def _realpath(fs, path, seen=pset()):
"""
.. warning::
The ``os.path`` module's realpath does not error or warn about
loops, but we do, following the behavior of GNU ``realpath(1)``!
"""
real = Path.root()
for segment in path.segments:
current = real / segment
seen = seen.add(current)
while True:
try:
current = fs.readlink(current)
except (exceptions.FileNotFound, exceptions.NotASymlink):
break
else:
current = current.relative_to(real)
if current in seen:
raise exceptions.SymbolicLoop(path)
current = fs.realpath(current, seen=seen)
real = current
return real | .. warning::
The ``os.path`` module's realpath does not error or warn about
loops, but we do, following the behavior of GNU ``realpath(1)``! | Below is the the instruction that describes the task:
### Input:
.. warning::
The ``os.path`` module's realpath does not error or warn about
loops, but we do, following the behavior of GNU ``realpath(1)``!
### Response:
def _realpath(fs, path, seen=pset()):
"""
.. warning::
The ``os.path`` module's realpath does not error or warn about
loops, but we do, following the behavior of GNU ``realpath(1)``!
"""
real = Path.root()
for segment in path.segments:
current = real / segment
seen = seen.add(current)
while True:
try:
current = fs.readlink(current)
except (exceptions.FileNotFound, exceptions.NotASymlink):
break
else:
current = current.relative_to(real)
if current in seen:
raise exceptions.SymbolicLoop(path)
current = fs.realpath(current, seen=seen)
real = current
return real |
def get_markdown_files(self, dir_):
"""
Get all the markdown files in a folder, recursively
Args:
dir_: str, a toplevel folder to walk.
"""
md_files = OrderedSet()
for root, _, files in os.walk(dir_):
for name in files:
split = os.path.splitext(name)
if len(split) == 1:
continue
if split[1] in ('.markdown', '.md', '.yaml'):
md_files.add(os.path.join(root, name))
return md_files | Get all the markdown files in a folder, recursively
Args:
dir_: str, a toplevel folder to walk. | Below is the the instruction that describes the task:
### Input:
Get all the markdown files in a folder, recursively
Args:
dir_: str, a toplevel folder to walk.
### Response:
def get_markdown_files(self, dir_):
"""
Get all the markdown files in a folder, recursively
Args:
dir_: str, a toplevel folder to walk.
"""
md_files = OrderedSet()
for root, _, files in os.walk(dir_):
for name in files:
split = os.path.splitext(name)
if len(split) == 1:
continue
if split[1] in ('.markdown', '.md', '.yaml'):
md_files.add(os.path.join(root, name))
return md_files |
def get_attributes(self, section, strip_namespaces=False):
"""
Returns given section attributes.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x125698322>
>>> sections_file_parser.get_attributes("Section A")
OrderedDict([(u'Section A|Attribute 1', u'Value A')])
>>> sections_file_parser.preserve_order=False
>>> sections_file_parser.get_attributes("Section A")
{u'Section A|Attribute 1': u'Value A'}
>>> sections_file_parser.preserve_order=True
>>> sections_file_parser.get_attributes("Section A", strip_namespaces=True)
OrderedDict([(u'Attribute 1', u'Value A')])
:param section: Section containing the requested attributes.
:type section: unicode
:param strip_namespaces: Strip namespaces while retrieving attributes.
:type strip_namespaces: bool
:return: Attributes.
:rtype: OrderedDict or dict
"""
LOGGER.debug("> Getting section '{0}' attributes.".format(section))
attributes = OrderedDict() if self.__preserve_order else dict()
if not self.section_exists(section):
return attributes
if strip_namespaces:
for attribute, value in self.__sections[section].iteritems():
attributes[foundations.namespace.remove_namespace(attribute, root_only=True)] = value
else:
attributes.update(self.__sections[section])
LOGGER.debug("> Attributes: '{0}'.".format(attributes))
return attributes | Returns given section attributes.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x125698322>
>>> sections_file_parser.get_attributes("Section A")
OrderedDict([(u'Section A|Attribute 1', u'Value A')])
>>> sections_file_parser.preserve_order=False
>>> sections_file_parser.get_attributes("Section A")
{u'Section A|Attribute 1': u'Value A'}
>>> sections_file_parser.preserve_order=True
>>> sections_file_parser.get_attributes("Section A", strip_namespaces=True)
OrderedDict([(u'Attribute 1', u'Value A')])
:param section: Section containing the requested attributes.
:type section: unicode
:param strip_namespaces: Strip namespaces while retrieving attributes.
:type strip_namespaces: bool
:return: Attributes.
:rtype: OrderedDict or dict | Below is the the instruction that describes the task:
### Input:
Returns given section attributes.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x125698322>
>>> sections_file_parser.get_attributes("Section A")
OrderedDict([(u'Section A|Attribute 1', u'Value A')])
>>> sections_file_parser.preserve_order=False
>>> sections_file_parser.get_attributes("Section A")
{u'Section A|Attribute 1': u'Value A'}
>>> sections_file_parser.preserve_order=True
>>> sections_file_parser.get_attributes("Section A", strip_namespaces=True)
OrderedDict([(u'Attribute 1', u'Value A')])
:param section: Section containing the requested attributes.
:type section: unicode
:param strip_namespaces: Strip namespaces while retrieving attributes.
:type strip_namespaces: bool
:return: Attributes.
:rtype: OrderedDict or dict
### Response:
def get_attributes(self, section, strip_namespaces=False):
"""
Returns given section attributes.
Usage::
>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
>>> sections_file_parser = SectionsFileParser()
>>> sections_file_parser.content = content
>>> sections_file_parser.parse()
<foundations.parsers.SectionsFileParser object at 0x125698322>
>>> sections_file_parser.get_attributes("Section A")
OrderedDict([(u'Section A|Attribute 1', u'Value A')])
>>> sections_file_parser.preserve_order=False
>>> sections_file_parser.get_attributes("Section A")
{u'Section A|Attribute 1': u'Value A'}
>>> sections_file_parser.preserve_order=True
>>> sections_file_parser.get_attributes("Section A", strip_namespaces=True)
OrderedDict([(u'Attribute 1', u'Value A')])
:param section: Section containing the requested attributes.
:type section: unicode
:param strip_namespaces: Strip namespaces while retrieving attributes.
:type strip_namespaces: bool
:return: Attributes.
:rtype: OrderedDict or dict
"""
LOGGER.debug("> Getting section '{0}' attributes.".format(section))
attributes = OrderedDict() if self.__preserve_order else dict()
if not self.section_exists(section):
return attributes
if strip_namespaces:
for attribute, value in self.__sections[section].iteritems():
attributes[foundations.namespace.remove_namespace(attribute, root_only=True)] = value
else:
attributes.update(self.__sections[section])
LOGGER.debug("> Attributes: '{0}'.".format(attributes))
return attributes |
def merge(cls, *others):
"""
Merge the `others` schema into this instance.
The values will all be read from the provider of the original object.
"""
for other in others:
for k, v in other:
setattr(cls, k, BoundValue(cls, k, v.value)) | Merge the `others` schema into this instance.
The values will all be read from the provider of the original object. | Below is the the instruction that describes the task:
### Input:
Merge the `others` schema into this instance.
The values will all be read from the provider of the original object.
### Response:
def merge(cls, *others):
"""
Merge the `others` schema into this instance.
The values will all be read from the provider of the original object.
"""
for other in others:
for k, v in other:
setattr(cls, k, BoundValue(cls, k, v.value)) |
def fingerprint_target(self, target):
"""Consumers of subclass instances call this to get a fingerprint labeled with the name"""
fingerprint = self.compute_fingerprint(target)
if fingerprint:
return '{fingerprint}-{name}'.format(fingerprint=fingerprint, name=type(self).__name__)
else:
return None | Consumers of subclass instances call this to get a fingerprint labeled with the name | Below is the the instruction that describes the task:
### Input:
Consumers of subclass instances call this to get a fingerprint labeled with the name
### Response:
def fingerprint_target(self, target):
"""Consumers of subclass instances call this to get a fingerprint labeled with the name"""
fingerprint = self.compute_fingerprint(target)
if fingerprint:
return '{fingerprint}-{name}'.format(fingerprint=fingerprint, name=type(self).__name__)
else:
return None |
def execute_cast_timestamp_to_timestamp(op, data, type, **kwargs):
"""Cast timestamps to other timestamps including timezone if necessary"""
input_timezone = data.tz
target_timezone = type.timezone
if input_timezone == target_timezone:
return data
if input_timezone is None or target_timezone is None:
return data.tz_localize(target_timezone)
return data.tz_convert(target_timezone) | Cast timestamps to other timestamps including timezone if necessary | Below is the the instruction that describes the task:
### Input:
Cast timestamps to other timestamps including timezone if necessary
### Response:
def execute_cast_timestamp_to_timestamp(op, data, type, **kwargs):
"""Cast timestamps to other timestamps including timezone if necessary"""
input_timezone = data.tz
target_timezone = type.timezone
if input_timezone == target_timezone:
return data
if input_timezone is None or target_timezone is None:
return data.tz_localize(target_timezone)
return data.tz_convert(target_timezone) |
def regenerate_good_tokens(string):
"""
Given an input string, part of speech tags the string, then generates a list of
ngrams that appear in the string.
Used to define grammatically correct part of speech tag sequences.
Returns a list of part of speech tag sequences.
"""
toks = nltk.word_tokenize(string)
pos_string = nltk.pos_tag(toks)
pos_seq = [tag[1] for tag in pos_string]
pos_ngrams = ngrams(pos_seq, 2, 4)
sel_pos_ngrams = f7(pos_ngrams)
return sel_pos_ngrams | Given an input string, part of speech tags the string, then generates a list of
ngrams that appear in the string.
Used to define grammatically correct part of speech tag sequences.
Returns a list of part of speech tag sequences. | Below is the the instruction that describes the task:
### Input:
Given an input string, part of speech tags the string, then generates a list of
ngrams that appear in the string.
Used to define grammatically correct part of speech tag sequences.
Returns a list of part of speech tag sequences.
### Response:
def regenerate_good_tokens(string):
"""
Given an input string, part of speech tags the string, then generates a list of
ngrams that appear in the string.
Used to define grammatically correct part of speech tag sequences.
Returns a list of part of speech tag sequences.
"""
toks = nltk.word_tokenize(string)
pos_string = nltk.pos_tag(toks)
pos_seq = [tag[1] for tag in pos_string]
pos_ngrams = ngrams(pos_seq, 2, 4)
sel_pos_ngrams = f7(pos_ngrams)
return sel_pos_ngrams |
def parse_authn_query_response(self, response, binding=BINDING_SOAP):
""" Verify that the response is OK
"""
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
return self._parse_response(response, AuthnQueryResponse, "", binding,
**kwargs) | Verify that the response is OK | Below is the the instruction that describes the task:
### Input:
Verify that the response is OK
### Response:
def parse_authn_query_response(self, response, binding=BINDING_SOAP):
""" Verify that the response is OK
"""
kwargs = {"entity_id": self.config.entityid,
"attribute_converters": self.config.attribute_converters}
return self._parse_response(response, AuthnQueryResponse, "", binding,
**kwargs) |
def _format_output(content, typ):
"""Tabularize the content according to its type.
Args:
content (str): The content of a metric.
typ (str): The type of metric -- (raw|json|tsv|htsv|csv|hcsv).
Returns:
str: Content in a raw or tabular format.
"""
if "csv" in str(typ):
return _format_csv(content, delimiter=",")
if "tsv" in str(typ):
return _format_csv(content, delimiter="\t")
return content | Tabularize the content according to its type.
Args:
content (str): The content of a metric.
typ (str): The type of metric -- (raw|json|tsv|htsv|csv|hcsv).
Returns:
str: Content in a raw or tabular format. | Below is the the instruction that describes the task:
### Input:
Tabularize the content according to its type.
Args:
content (str): The content of a metric.
typ (str): The type of metric -- (raw|json|tsv|htsv|csv|hcsv).
Returns:
str: Content in a raw or tabular format.
### Response:
def _format_output(content, typ):
"""Tabularize the content according to its type.
Args:
content (str): The content of a metric.
typ (str): The type of metric -- (raw|json|tsv|htsv|csv|hcsv).
Returns:
str: Content in a raw or tabular format.
"""
if "csv" in str(typ):
return _format_csv(content, delimiter=",")
if "tsv" in str(typ):
return _format_csv(content, delimiter="\t")
return content |
def getNamespaces(self):
"""
Get the I{unique} set of namespaces referenced in the branch.
@return: A set of namespaces.
@rtype: set
"""
s = set()
for n in self.branch + self.node.ancestors():
if self.permit(n.expns):
s.add(n.expns)
s = s.union(self.pset(n))
return s | Get the I{unique} set of namespaces referenced in the branch.
@return: A set of namespaces.
@rtype: set | Below is the the instruction that describes the task:
### Input:
Get the I{unique} set of namespaces referenced in the branch.
@return: A set of namespaces.
@rtype: set
### Response:
def getNamespaces(self):
"""
Get the I{unique} set of namespaces referenced in the branch.
@return: A set of namespaces.
@rtype: set
"""
s = set()
for n in self.branch + self.node.ancestors():
if self.permit(n.expns):
s.add(n.expns)
s = s.union(self.pset(n))
return s |
def match_to_dict(match):
"""Convert a match object into a dict.
Values are:
indent: amount of indentation of this [sub]account
parent: the parent dict (None)
account_fragment: account name fragment
balance: decimal.Decimal balance
children: sub-accounts ([])
"""
balance, indent, account_fragment = match.group(1, 2, 3)
return {
'balance': decimal.Decimal(balance),
'indent': len(indent),
'account_fragment': account_fragment,
'parent': None,
'children': [],
} | Convert a match object into a dict.
Values are:
indent: amount of indentation of this [sub]account
parent: the parent dict (None)
account_fragment: account name fragment
balance: decimal.Decimal balance
children: sub-accounts ([]) | Below is the the instruction that describes the task:
### Input:
Convert a match object into a dict.
Values are:
indent: amount of indentation of this [sub]account
parent: the parent dict (None)
account_fragment: account name fragment
balance: decimal.Decimal balance
children: sub-accounts ([])
### Response:
def match_to_dict(match):
"""Convert a match object into a dict.
Values are:
indent: amount of indentation of this [sub]account
parent: the parent dict (None)
account_fragment: account name fragment
balance: decimal.Decimal balance
children: sub-accounts ([])
"""
balance, indent, account_fragment = match.group(1, 2, 3)
return {
'balance': decimal.Decimal(balance),
'indent': len(indent),
'account_fragment': account_fragment,
'parent': None,
'children': [],
} |
def get_albums(self, path):
"""Return the list of all sub-directories of path."""
for name in self.albums[path].subdirs:
subdir = os.path.normpath(join(path, name))
yield subdir, self.albums[subdir]
for subname, album in self.get_albums(subdir):
yield subname, self.albums[subdir] | Return the list of all sub-directories of path. | Below is the the instruction that describes the task:
### Input:
Return the list of all sub-directories of path.
### Response:
def get_albums(self, path):
"""Return the list of all sub-directories of path."""
for name in self.albums[path].subdirs:
subdir = os.path.normpath(join(path, name))
yield subdir, self.albums[subdir]
for subname, album in self.get_albums(subdir):
yield subname, self.albums[subdir] |
async def _sync_revoc_for_proof(self, rr_id: str) -> None:
"""
Pick up tails file reader handle for input revocation registry identifier. If no symbolic
link is present, get the revocation registry definition to retrieve its tails file hash,
then find the tails file and link it.
Raise AbsentTails for missing corresponding tails file.
:param rr_id: revocation registry identifier
"""
LOGGER.debug('HolderProver._sync_revoc_for_proof >>> rr_id: %s', rr_id)
if not ok_rev_reg_id(rr_id):
LOGGER.debug('HolderProver._sync_revoc_for_proof <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
try:
json.loads(await self.get_cred_def(cd_id))
except AbsentCredDef:
LOGGER.debug(
'HolderProver._sync_revoc_for_proof <!< corrupt tails tree %s may be for another ledger',
self._dir_tails)
raise AbsentCredDef('Corrupt tails tree {} may be for another ledger'.format(self._dir_tails))
except ClosedPool:
pass # carry on, may be OK from cache only
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
tails = revo_cache_entry.tails if revo_cache_entry else None
if tails is None: # it's not yet set in cache
try:
tails = await Tails(self._dir_tails, cd_id, tag).open()
except AbsentTails: # get hash from ledger and check for tails file
rr_def = json.loads(await self.get_rev_reg_def(rr_id))
tails_hash = rr_def['value']['tailsHash']
path_tails = join(Tails.dir(self._dir_tails, rr_id), tails_hash)
if not isfile(path_tails):
LOGGER.debug('HolderProver._sync_revoc_for_proof <!< No tails file present at %s', path_tails)
raise AbsentTails('No tails file present at {}'.format(path_tails))
Tails.associate(self._dir_tails, rr_id, tails_hash)
tails = await Tails(self._dir_tails, cd_id, tag).open() # OK now since tails file present
if revo_cache_entry is None:
REVO_CACHE[rr_id] = RevoCacheEntry(None, tails)
else:
REVO_CACHE[rr_id].tails = tails
LOGGER.debug('HolderProver._sync_revoc_for_proof <<<') | Pick up tails file reader handle for input revocation registry identifier. If no symbolic
link is present, get the revocation registry definition to retrieve its tails file hash,
then find the tails file and link it.
Raise AbsentTails for missing corresponding tails file.
:param rr_id: revocation registry identifier | Below is the the instruction that describes the task:
### Input:
Pick up tails file reader handle for input revocation registry identifier. If no symbolic
link is present, get the revocation registry definition to retrieve its tails file hash,
then find the tails file and link it.
Raise AbsentTails for missing corresponding tails file.
:param rr_id: revocation registry identifier
### Response:
async def _sync_revoc_for_proof(self, rr_id: str) -> None:
"""
Pick up tails file reader handle for input revocation registry identifier. If no symbolic
link is present, get the revocation registry definition to retrieve its tails file hash,
then find the tails file and link it.
Raise AbsentTails for missing corresponding tails file.
:param rr_id: revocation registry identifier
"""
LOGGER.debug('HolderProver._sync_revoc_for_proof >>> rr_id: %s', rr_id)
if not ok_rev_reg_id(rr_id):
LOGGER.debug('HolderProver._sync_revoc_for_proof <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
try:
json.loads(await self.get_cred_def(cd_id))
except AbsentCredDef:
LOGGER.debug(
'HolderProver._sync_revoc_for_proof <!< corrupt tails tree %s may be for another ledger',
self._dir_tails)
raise AbsentCredDef('Corrupt tails tree {} may be for another ledger'.format(self._dir_tails))
except ClosedPool:
pass # carry on, may be OK from cache only
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
tails = revo_cache_entry.tails if revo_cache_entry else None
if tails is None: # it's not yet set in cache
try:
tails = await Tails(self._dir_tails, cd_id, tag).open()
except AbsentTails: # get hash from ledger and check for tails file
rr_def = json.loads(await self.get_rev_reg_def(rr_id))
tails_hash = rr_def['value']['tailsHash']
path_tails = join(Tails.dir(self._dir_tails, rr_id), tails_hash)
if not isfile(path_tails):
LOGGER.debug('HolderProver._sync_revoc_for_proof <!< No tails file present at %s', path_tails)
raise AbsentTails('No tails file present at {}'.format(path_tails))
Tails.associate(self._dir_tails, rr_id, tails_hash)
tails = await Tails(self._dir_tails, cd_id, tag).open() # OK now since tails file present
if revo_cache_entry is None:
REVO_CACHE[rr_id] = RevoCacheEntry(None, tails)
else:
REVO_CACHE[rr_id].tails = tails
LOGGER.debug('HolderProver._sync_revoc_for_proof <<<') |
def _cmd_run(cmd):
'''
Run the aptly command.
:return: The string output of the command.
:rtype: str
'''
cmd.insert(0, 'aptly')
cmd_ret = __salt__['cmd.run_all'](cmd, ignore_retcode=True)
if cmd_ret['retcode'] != 0:
log.debug('Unable to execute command: %s\nError: %s', cmd,
cmd_ret['stderr'])
return cmd_ret['stdout'] | Run the aptly command.
:return: The string output of the command.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Run the aptly command.
:return: The string output of the command.
:rtype: str
### Response:
def _cmd_run(cmd):
'''
Run the aptly command.
:return: The string output of the command.
:rtype: str
'''
cmd.insert(0, 'aptly')
cmd_ret = __salt__['cmd.run_all'](cmd, ignore_retcode=True)
if cmd_ret['retcode'] != 0:
log.debug('Unable to execute command: %s\nError: %s', cmd,
cmd_ret['stderr'])
return cmd_ret['stdout'] |
def load_models(self, model_path):
"""
Load models from pickle files.
"""
condition_model_files = sorted(glob(model_path + "*_condition.pkl"))
if len(condition_model_files) > 0:
for condition_model_file in condition_model_files:
model_comps = condition_model_file.split("/")[-1][:-4].split("_")
if model_comps[0] not in self.condition_models.keys():
self.condition_models[model_comps[0]] = {}
model_name = model_comps[1].replace("-", " ")
with open(condition_model_file, "rb") as cmf:
if "condition_threshold" in condition_model_file:
self.condition_models[model_comps[0]][model_name + "_condition_threshold"] = pickle.load(cmf)
else:
self.condition_models[model_comps[0]][model_name] = pickle.load(cmf)
size_model_files = sorted(glob(model_path + "*_size.pkl"))
if len(size_model_files) > 0:
for size_model_file in size_model_files:
model_comps = size_model_file.split("/")[-1][:-4].split("_")
if model_comps[0] not in self.size_models.keys():
self.size_models[model_comps[0]] = {}
model_name = model_comps[1].replace("-", " ")
with open(size_model_file, "rb") as smf:
self.size_models[model_comps[0]][model_name] = pickle.load(smf)
size_dist_model_files = sorted(glob(model_path + "*_sizedist.pkl"))
if len(size_dist_model_files) > 0:
for dist_model_file in size_dist_model_files:
model_comps = dist_model_file.split("/")[-1][:-4].split("_")
if model_comps[0] not in self.size_distribution_models.keys():
self.size_distribution_models[model_comps[0]] = {}
if "_".join(model_comps[2:-1]) not in self.size_distribution_models[model_comps[0]].keys():
self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])] = {}
model_name = model_comps[1].replace("-", " ")
with open(dist_model_file, "rb") as dmf:
self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])][
model_name] = pickle.load(dmf)
track_model_files = sorted(glob(model_path + "*_track.pkl"))
if len(track_model_files) > 0:
for track_model_file in track_model_files:
model_comps = track_model_file.split("/")[-1][:-4].split("_")
group = model_comps[0]
model_name = model_comps[1].replace("-", " ")
model_type = model_comps[2]
if model_type not in self.track_models.keys():
self.track_models[model_type] = {}
if group not in self.track_models[model_type].keys():
self.track_models[model_type][group] = {}
with open(track_model_file, "rb") as tmf:
self.track_models[model_type][group][model_name] = pickle.load(tmf) | Load models from pickle files. | Below is the the instruction that describes the task:
### Input:
Load models from pickle files.
### Response:
def load_models(self, model_path):
"""
Load models from pickle files.
"""
condition_model_files = sorted(glob(model_path + "*_condition.pkl"))
if len(condition_model_files) > 0:
for condition_model_file in condition_model_files:
model_comps = condition_model_file.split("/")[-1][:-4].split("_")
if model_comps[0] not in self.condition_models.keys():
self.condition_models[model_comps[0]] = {}
model_name = model_comps[1].replace("-", " ")
with open(condition_model_file, "rb") as cmf:
if "condition_threshold" in condition_model_file:
self.condition_models[model_comps[0]][model_name + "_condition_threshold"] = pickle.load(cmf)
else:
self.condition_models[model_comps[0]][model_name] = pickle.load(cmf)
size_model_files = sorted(glob(model_path + "*_size.pkl"))
if len(size_model_files) > 0:
for size_model_file in size_model_files:
model_comps = size_model_file.split("/")[-1][:-4].split("_")
if model_comps[0] not in self.size_models.keys():
self.size_models[model_comps[0]] = {}
model_name = model_comps[1].replace("-", " ")
with open(size_model_file, "rb") as smf:
self.size_models[model_comps[0]][model_name] = pickle.load(smf)
size_dist_model_files = sorted(glob(model_path + "*_sizedist.pkl"))
if len(size_dist_model_files) > 0:
for dist_model_file in size_dist_model_files:
model_comps = dist_model_file.split("/")[-1][:-4].split("_")
if model_comps[0] not in self.size_distribution_models.keys():
self.size_distribution_models[model_comps[0]] = {}
if "_".join(model_comps[2:-1]) not in self.size_distribution_models[model_comps[0]].keys():
self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])] = {}
model_name = model_comps[1].replace("-", " ")
with open(dist_model_file, "rb") as dmf:
self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])][
model_name] = pickle.load(dmf)
track_model_files = sorted(glob(model_path + "*_track.pkl"))
if len(track_model_files) > 0:
for track_model_file in track_model_files:
model_comps = track_model_file.split("/")[-1][:-4].split("_")
group = model_comps[0]
model_name = model_comps[1].replace("-", " ")
model_type = model_comps[2]
if model_type not in self.track_models.keys():
self.track_models[model_type] = {}
if group not in self.track_models[model_type].keys():
self.track_models[model_type][group] = {}
with open(track_model_file, "rb") as tmf:
self.track_models[model_type][group][model_name] = pickle.load(tmf) |
def get_token(self, appname, username, password):
"""
get the security token by connecting to TouchWorks API
"""
ext_exception = TouchWorksException(
TouchWorksErrorMessages.GET_TOKEN_FAILED_ERROR)
data = {'Username': username,
'Password': password}
resp = self._http_request(TouchWorksEndPoints.GET_TOKEN, data)
try:
logger.debug('token : %s' % resp)
if not resp.text:
raise ext_exception
try:
uuid.UUID(resp.text, version=4)
return SecurityToken(resp.text)
except ValueError:
logger.error('response was not valid uuid string. %s' % resp.text)
raise ext_exception
except Exception as ex:
logger.exception(ex)
raise ext_exception | get the security token by connecting to TouchWorks API | Below is the the instruction that describes the task:
### Input:
get the security token by connecting to TouchWorks API
### Response:
def get_token(self, appname, username, password):
"""
get the security token by connecting to TouchWorks API
"""
ext_exception = TouchWorksException(
TouchWorksErrorMessages.GET_TOKEN_FAILED_ERROR)
data = {'Username': username,
'Password': password}
resp = self._http_request(TouchWorksEndPoints.GET_TOKEN, data)
try:
logger.debug('token : %s' % resp)
if not resp.text:
raise ext_exception
try:
uuid.UUID(resp.text, version=4)
return SecurityToken(resp.text)
except ValueError:
logger.error('response was not valid uuid string. %s' % resp.text)
raise ext_exception
except Exception as ex:
logger.exception(ex)
raise ext_exception |
def values(self):
"""
Returns an `np.array` of type `dtype` containing
some values from the domain.
For domains where `is_finite` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
separate_values = [domain.values for domain in self._domains]
return np.concatenate([
join_struct_arrays(list(map(np.array, value)))
for value in product(*separate_values)
]) | Returns an `np.array` of type `dtype` containing
some values from the domain.
For domains where `is_finite` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray` | Below is the the instruction that describes the task:
### Input:
Returns an `np.array` of type `dtype` containing
some values from the domain.
For domains where `is_finite` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
### Response:
def values(self):
"""
Returns an `np.array` of type `dtype` containing
some values from the domain.
For domains where `is_finite` is ``True``, all elements
of the domain will be yielded exactly once.
:rtype: `np.ndarray`
"""
separate_values = [domain.values for domain in self._domains]
return np.concatenate([
join_struct_arrays(list(map(np.array, value)))
for value in product(*separate_values)
]) |
def aggregate_series(self, key, aggregate, start=None, end=None,
interval=None, namespace=None, cache=None,
percentile=None):
"""Get a time series of gauge aggregates"""
return self.make_context(key=key, aggregate=aggregate, start=start,
end=end, interval=interval,
namespace=namespace, cache=cache,
percentile=percentile).aggregate_series() | Get a time series of gauge aggregates | Below is the the instruction that describes the task:
### Input:
Get a time series of gauge aggregates
### Response:
def aggregate_series(self, key, aggregate, start=None, end=None,
interval=None, namespace=None, cache=None,
percentile=None):
"""Get a time series of gauge aggregates"""
return self.make_context(key=key, aggregate=aggregate, start=start,
end=end, interval=interval,
namespace=namespace, cache=cache,
percentile=percentile).aggregate_series() |
def toggle_shells(command, enable):
"""Enable or disable the specified shells. If the command would have
no effect, it changes all other shells to the inverse enable value."""
selection = list(selected_shells(command))
if command and command != '*' and selection:
for i in selection:
if i.state != remote_dispatcher.STATE_DEAD and i.enabled != enable:
break
else:
toggle_shells('*', not enable)
for i in selection:
if i.state != remote_dispatcher.STATE_DEAD:
i.set_enabled(enable) | Enable or disable the specified shells. If the command would have
no effect, it changes all other shells to the inverse enable value. | Below is the the instruction that describes the task:
### Input:
Enable or disable the specified shells. If the command would have
no effect, it changes all other shells to the inverse enable value.
### Response:
def toggle_shells(command, enable):
"""Enable or disable the specified shells. If the command would have
no effect, it changes all other shells to the inverse enable value."""
selection = list(selected_shells(command))
if command and command != '*' and selection:
for i in selection:
if i.state != remote_dispatcher.STATE_DEAD and i.enabled != enable:
break
else:
toggle_shells('*', not enable)
for i in selection:
if i.state != remote_dispatcher.STATE_DEAD:
i.set_enabled(enable) |
def get_readable_tasks(self, course):
""" Returns the list of all available tasks in a course """
course_fs = self._filesystem.from_subfolder(course.get_id())
tasks = [
task[0:len(task)-1] # remove trailing /
for task in course_fs.list(folders=True, files=False, recursive=False)
if self._task_file_exists(course_fs.from_subfolder(task))]
return tasks | Returns the list of all available tasks in a course | Below is the the instruction that describes the task:
### Input:
Returns the list of all available tasks in a course
### Response:
def get_readable_tasks(self, course):
""" Returns the list of all available tasks in a course """
course_fs = self._filesystem.from_subfolder(course.get_id())
tasks = [
task[0:len(task)-1] # remove trailing /
for task in course_fs.list(folders=True, files=False, recursive=False)
if self._task_file_exists(course_fs.from_subfolder(task))]
return tasks |
def _gen_4spec(op, path, value,
create_path=False, xattr=False, _expand_macros=False):
"""
Like `_gen_3spec`, but also accepts a mandatory value as its third argument
:param bool _expand_macros: Whether macros in the value should be expanded.
The macros themselves are defined at the server side
"""
flags = 0
if create_path:
flags |= _P.SDSPEC_F_MKDIR_P
if xattr:
flags |= _P.SDSPEC_F_XATTR
if _expand_macros:
flags |= _P.SDSPEC_F_EXPANDMACROS
return Spec(op, path, flags, value) | Like `_gen_3spec`, but also accepts a mandatory value as its third argument
:param bool _expand_macros: Whether macros in the value should be expanded.
The macros themselves are defined at the server side | Below is the the instruction that describes the task:
### Input:
Like `_gen_3spec`, but also accepts a mandatory value as its third argument
:param bool _expand_macros: Whether macros in the value should be expanded.
The macros themselves are defined at the server side
### Response:
def _gen_4spec(op, path, value,
create_path=False, xattr=False, _expand_macros=False):
"""
Like `_gen_3spec`, but also accepts a mandatory value as its third argument
:param bool _expand_macros: Whether macros in the value should be expanded.
The macros themselves are defined at the server side
"""
flags = 0
if create_path:
flags |= _P.SDSPEC_F_MKDIR_P
if xattr:
flags |= _P.SDSPEC_F_XATTR
if _expand_macros:
flags |= _P.SDSPEC_F_EXPANDMACROS
return Spec(op, path, flags, value) |
def reverseComplement(self, isRNA=None):
"""
Reverse complement this sequence in-place.
:param isRNA: if True, treat this sequence as RNA. If False, treat it as
DNA. If None (default), inspect the sequence and make a
guess as to whether it is RNA or DNA.
"""
isRNA_l = self.isRNA() if isRNA is None else isRNA
tmp = ""
for n in self.sequenceData:
if isRNA_l:
tmp += RNA_COMPLEMENTS[n]
else:
tmp += DNA_COMPLEMENTS[n]
self.sequenceData = tmp[::-1] | Reverse complement this sequence in-place.
:param isRNA: if True, treat this sequence as RNA. If False, treat it as
DNA. If None (default), inspect the sequence and make a
guess as to whether it is RNA or DNA. | Below is the the instruction that describes the task:
### Input:
Reverse complement this sequence in-place.
:param isRNA: if True, treat this sequence as RNA. If False, treat it as
DNA. If None (default), inspect the sequence and make a
guess as to whether it is RNA or DNA.
### Response:
def reverseComplement(self, isRNA=None):
"""
Reverse complement this sequence in-place.
:param isRNA: if True, treat this sequence as RNA. If False, treat it as
DNA. If None (default), inspect the sequence and make a
guess as to whether it is RNA or DNA.
"""
isRNA_l = self.isRNA() if isRNA is None else isRNA
tmp = ""
for n in self.sequenceData:
if isRNA_l:
tmp += RNA_COMPLEMENTS[n]
else:
tmp += DNA_COMPLEMENTS[n]
self.sequenceData = tmp[::-1] |
def _get_showcase_dataset_dict(self, dataset):
# type: (Union[hdx.data.dataset.Dataset,Dict,str]) -> Dict
"""Get showcase dataset dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
Dict: showcase dataset dict
"""
if isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict):
if 'id' not in dataset:
dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name'])
dataset = dataset['id']
elif not isinstance(dataset, str):
raise hdx.data.hdxobject.HDXError('Type %s cannot be added as a dataset!' % type(dataset).__name__)
if is_valid_uuid(dataset) is False:
raise hdx.data.hdxobject.HDXError('%s is not a valid dataset id!' % dataset)
return {'showcase_id': self.data['id'], 'package_id': dataset} | Get showcase dataset dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
Dict: showcase dataset dict | Below is the the instruction that describes the task:
### Input:
Get showcase dataset dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
Dict: showcase dataset dict
### Response:
def _get_showcase_dataset_dict(self, dataset):
# type: (Union[hdx.data.dataset.Dataset,Dict,str]) -> Dict
"""Get showcase dataset dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
Dict: showcase dataset dict
"""
if isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict):
if 'id' not in dataset:
dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name'])
dataset = dataset['id']
elif not isinstance(dataset, str):
raise hdx.data.hdxobject.HDXError('Type %s cannot be added as a dataset!' % type(dataset).__name__)
if is_valid_uuid(dataset) is False:
raise hdx.data.hdxobject.HDXError('%s is not a valid dataset id!' % dataset)
return {'showcase_id': self.data['id'], 'package_id': dataset} |
def _remove_last(votes, fpl, cl, ranking):
"""Remove last candidate in IRV voting.
"""
for v in votes:
for r in v:
if r == fpl[-1]:
v.remove(r)
for c in cl:
if c == fpl[-1]:
if c not in ranking:
ranking.append((c, len(ranking) + 1)) | Remove last candidate in IRV voting. | Below is the the instruction that describes the task:
### Input:
Remove last candidate in IRV voting.
### Response:
def _remove_last(votes, fpl, cl, ranking):
"""Remove last candidate in IRV voting.
"""
for v in votes:
for r in v:
if r == fpl[-1]:
v.remove(r)
for c in cl:
if c == fpl[-1]:
if c not in ranking:
ranking.append((c, len(ranking) + 1)) |
def append_pipeline(self, name, *args, **kwargs) -> Pipeline:
"""Add an entry to the pipeline. Defers to :meth:`pybel_tools.pipeline.Pipeline.append`.
:param name: The name of the function
:type name: str or types.FunctionType
:return: This pipeline for fluid query building
"""
return self.pipeline.append(name, *args, **kwargs) | Add an entry to the pipeline. Defers to :meth:`pybel_tools.pipeline.Pipeline.append`.
:param name: The name of the function
:type name: str or types.FunctionType
:return: This pipeline for fluid query building | Below is the the instruction that describes the task:
### Input:
Add an entry to the pipeline. Defers to :meth:`pybel_tools.pipeline.Pipeline.append`.
:param name: The name of the function
:type name: str or types.FunctionType
:return: This pipeline for fluid query building
### Response:
def append_pipeline(self, name, *args, **kwargs) -> Pipeline:
"""Add an entry to the pipeline. Defers to :meth:`pybel_tools.pipeline.Pipeline.append`.
:param name: The name of the function
:type name: str or types.FunctionType
:return: This pipeline for fluid query building
"""
return self.pipeline.append(name, *args, **kwargs) |
def format_json(config):
"""format config for lambda exec
"""
with open(config) as fh:
print(json.dumps(yaml.safe_load(fh.read()), indent=2)) | format config for lambda exec | Below is the the instruction that describes the task:
### Input:
format config for lambda exec
### Response:
def format_json(config):
"""format config for lambda exec
"""
with open(config) as fh:
print(json.dumps(yaml.safe_load(fh.read()), indent=2)) |
def deleteFile(self, CorpNum, ItemCode, MgtKey, FileID, UserID=None):
""" 첨부파일 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FileID : 파일아이디, 첨부파일 목록확인(getFiles) API 응답전문의 AttachedFile 변수값
UserID : 팝빌회원 아이디
return
첨부파일 정보 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
if FileID == None or FileID == "":
raise PopbillException(-99999999, "파일아이디가 입력되지 않았습니다.")
postData = ''
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files/' + FileID, postData, CorpNum,
UserID, 'DELETE') | 첨부파일 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FileID : 파일아이디, 첨부파일 목록확인(getFiles) API 응답전문의 AttachedFile 변수값
UserID : 팝빌회원 아이디
return
첨부파일 정보 목록 as List
raise
PopbillException | Below is the the instruction that describes the task:
### Input:
첨부파일 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FileID : 파일아이디, 첨부파일 목록확인(getFiles) API 응답전문의 AttachedFile 변수값
UserID : 팝빌회원 아이디
return
첨부파일 정보 목록 as List
raise
PopbillException
### Response:
def deleteFile(self, CorpNum, ItemCode, MgtKey, FileID, UserID=None):
""" 첨부파일 삭제
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
FileID : 파일아이디, 첨부파일 목록확인(getFiles) API 응답전문의 AttachedFile 변수값
UserID : 팝빌회원 아이디
return
첨부파일 정보 목록 as List
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
if FileID == None or FileID == "":
raise PopbillException(-99999999, "파일아이디가 입력되지 않았습니다.")
postData = ''
return self._httppost('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files/' + FileID, postData, CorpNum,
UserID, 'DELETE') |
def ReadTrigger(self, trigger_link, options=None):
"""Reads a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The read Trigger.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.Read(path, 'triggers', trigger_id, None, options) | Reads a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The read Trigger.
:rtype:
dict | Below is the the instruction that describes the task:
### Input:
Reads a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The read Trigger.
:rtype:
dict
### Response:
def ReadTrigger(self, trigger_link, options=None):
"""Reads a trigger.
:param str trigger_link:
The link to the trigger.
:param dict options:
The request options for the request.
:return:
The read Trigger.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(trigger_link)
trigger_id = base.GetResourceIdOrFullNameFromLink(trigger_link)
return self.Read(path, 'triggers', trigger_id, None, options) |
def mapToPixel(mX, mY, geoTransform):
"""Convert map coordinates to pixel coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
mX = np.asarray(mX)
mY = np.asarray(mY)
if geoTransform[2] + geoTransform[4] == 0:
pX = ((mX - geoTransform[0]) / geoTransform[1]) - 0.5
pY = ((mY - geoTransform[3]) / geoTransform[5]) - 0.5
else:
pX, pY = applyGeoTransform(mX, mY, invertGeoTransform(geoTransform))
#return int(pX), int(pY)
return pX, pY | Convert map coordinates to pixel coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform) | Below is the the instruction that describes the task:
### Input:
Convert map coordinates to pixel coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
### Response:
def mapToPixel(mX, mY, geoTransform):
"""Convert map coordinates to pixel coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
mX = np.asarray(mX)
mY = np.asarray(mY)
if geoTransform[2] + geoTransform[4] == 0:
pX = ((mX - geoTransform[0]) / geoTransform[1]) - 0.5
pY = ((mY - geoTransform[3]) / geoTransform[5]) - 0.5
else:
pX, pY = applyGeoTransform(mX, mY, invertGeoTransform(geoTransform))
#return int(pX), int(pY)
return pX, pY |
def simulate_paths(is_returns, num_days,
starting_value=1, num_samples=1000, random_seed=None):
"""
Gnerate alternate paths using available values from in-sample returns.
Parameters
----------
is_returns : pandas.core.frame.DataFrame
Non-cumulative in-sample returns.
num_days : int
Number of days to project the probability cone forward.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
samples : numpy.ndarray
"""
samples = np.empty((num_samples, num_days))
seed = np.random.RandomState(seed=random_seed)
for i in range(num_samples):
samples[i, :] = is_returns.sample(num_days, replace=True,
random_state=seed)
return samples | Gnerate alternate paths using available values from in-sample returns.
Parameters
----------
is_returns : pandas.core.frame.DataFrame
Non-cumulative in-sample returns.
num_days : int
Number of days to project the probability cone forward.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
samples : numpy.ndarray | Below is the the instruction that describes the task:
### Input:
Gnerate alternate paths using available values from in-sample returns.
Parameters
----------
is_returns : pandas.core.frame.DataFrame
Non-cumulative in-sample returns.
num_days : int
Number of days to project the probability cone forward.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
samples : numpy.ndarray
### Response:
def simulate_paths(is_returns, num_days,
starting_value=1, num_samples=1000, random_seed=None):
"""
Gnerate alternate paths using available values from in-sample returns.
Parameters
----------
is_returns : pandas.core.frame.DataFrame
Non-cumulative in-sample returns.
num_days : int
Number of days to project the probability cone forward.
starting_value : int or float
Starting value of the out of sample period.
num_samples : int
Number of samples to draw from the in-sample daily returns.
Each sample will be an array with length num_days.
A higher number of samples will generate a more accurate
bootstrap cone.
random_seed : int
Seed for the pseudorandom number generator used by the pandas
sample method.
Returns
-------
samples : numpy.ndarray
"""
samples = np.empty((num_samples, num_days))
seed = np.random.RandomState(seed=random_seed)
for i in range(num_samples):
samples[i, :] = is_returns.sample(num_days, replace=True,
random_state=seed)
return samples |
def write_and_return(
command, ack, serial_connection, timeout=DEFAULT_WRITE_TIMEOUT):
'''Write a command and return the response'''
clear_buffer(serial_connection)
with serial_with_temp_timeout(
serial_connection, timeout) as device_connection:
response = _write_to_device_and_return(command, ack, device_connection)
return response | Write a command and return the response | Below is the the instruction that describes the task:
### Input:
Write a command and return the response
### Response:
def write_and_return(
command, ack, serial_connection, timeout=DEFAULT_WRITE_TIMEOUT):
'''Write a command and return the response'''
clear_buffer(serial_connection)
with serial_with_temp_timeout(
serial_connection, timeout) as device_connection:
response = _write_to_device_and_return(command, ack, device_connection)
return response |
def fluence(
power_mW,
color,
beam_radius,
reprate_Hz,
pulse_width,
color_units="wn",
beam_radius_units="mm",
pulse_width_units="fs_t",
area_type="even",
) -> tuple:
"""Calculate the fluence of a beam.
Parameters
----------
power_mW : number
Time integrated power of beam.
color : number
Color of beam in units.
beam_radius : number
Radius of beam in units.
reprate_Hz : number
Laser repetition rate in inverse seconds (Hz).
pulse_width : number
Pulsewidth of laser in units
color_units : string (optional)
Valid wt.units color unit identifier. Default is wn.
beam_radius_units : string (optional)
Valid wt.units distance unit identifier. Default is mm.
pulse_width_units : number
Valid wt.units time unit identifier. Default is fs.
area_type : string (optional)
Type of calculation to accomplish for Gaussian area.
even specfies a flat-top calculation
average specifies a Gaussian average within the FWHM
Default is even.
Returns
-------
tuple
Fluence in uj/cm2, photons/cm2, and peak intensity in GW/cm2
"""
# calculate beam area
if area_type == "even":
radius_cm = wt_units.converter(beam_radius, beam_radius_units, "cm")
area_cm2 = np.pi * radius_cm ** 2 # cm^2
elif area_type == "average":
radius_cm = wt_units.converter(beam_radius, beam_radius_units, "cm")
area_cm2 = np.pi * radius_cm ** 2 # cm^2
area_cm2 /= 0.7213 # weight by average intensity felt by oscillator inside of FWHM
else:
raise NotImplementedError
# calculate fluence in uj/cm^2
ujcm2 = power_mW / reprate_Hz # mJ
ujcm2 *= 1e3 # uJ
ujcm2 /= area_cm2 # uJ/cm^2
# calculate fluence in photons/cm^2
energy = wt_units.converter(color, color_units, "eV") # eV
photonscm2 = ujcm2 * 1e-6 # J/cm2
photonscm2 /= 1.60218e-19 # eV/cm2
photonscm2 /= energy # photons/cm2
# calculate peak intensity in GW/cm^2
pulse_width_s = wt_units.converter(pulse_width, pulse_width_units, "s_t") # seconds
GWcm2 = ujcm2 / 1e6 # J/cm2
GWcm2 /= pulse_width_s # W/cm2
GWcm2 /= 1e9
# finish
return ujcm2, photonscm2, GWcm2 | Calculate the fluence of a beam.
Parameters
----------
power_mW : number
Time integrated power of beam.
color : number
Color of beam in units.
beam_radius : number
Radius of beam in units.
reprate_Hz : number
Laser repetition rate in inverse seconds (Hz).
pulse_width : number
Pulsewidth of laser in units
color_units : string (optional)
Valid wt.units color unit identifier. Default is wn.
beam_radius_units : string (optional)
Valid wt.units distance unit identifier. Default is mm.
pulse_width_units : number
Valid wt.units time unit identifier. Default is fs.
area_type : string (optional)
Type of calculation to accomplish for Gaussian area.
even specfies a flat-top calculation
average specifies a Gaussian average within the FWHM
Default is even.
Returns
-------
tuple
Fluence in uj/cm2, photons/cm2, and peak intensity in GW/cm2 | Below is the the instruction that describes the task:
### Input:
Calculate the fluence of a beam.
Parameters
----------
power_mW : number
Time integrated power of beam.
color : number
Color of beam in units.
beam_radius : number
Radius of beam in units.
reprate_Hz : number
Laser repetition rate in inverse seconds (Hz).
pulse_width : number
Pulsewidth of laser in units
color_units : string (optional)
Valid wt.units color unit identifier. Default is wn.
beam_radius_units : string (optional)
Valid wt.units distance unit identifier. Default is mm.
pulse_width_units : number
Valid wt.units time unit identifier. Default is fs.
area_type : string (optional)
Type of calculation to accomplish for Gaussian area.
even specfies a flat-top calculation
average specifies a Gaussian average within the FWHM
Default is even.
Returns
-------
tuple
Fluence in uj/cm2, photons/cm2, and peak intensity in GW/cm2
### Response:
def fluence(
power_mW,
color,
beam_radius,
reprate_Hz,
pulse_width,
color_units="wn",
beam_radius_units="mm",
pulse_width_units="fs_t",
area_type="even",
) -> tuple:
"""Calculate the fluence of a beam.
Parameters
----------
power_mW : number
Time integrated power of beam.
color : number
Color of beam in units.
beam_radius : number
Radius of beam in units.
reprate_Hz : number
Laser repetition rate in inverse seconds (Hz).
pulse_width : number
Pulsewidth of laser in units
color_units : string (optional)
Valid wt.units color unit identifier. Default is wn.
beam_radius_units : string (optional)
Valid wt.units distance unit identifier. Default is mm.
pulse_width_units : number
Valid wt.units time unit identifier. Default is fs.
area_type : string (optional)
Type of calculation to accomplish for Gaussian area.
even specfies a flat-top calculation
average specifies a Gaussian average within the FWHM
Default is even.
Returns
-------
tuple
Fluence in uj/cm2, photons/cm2, and peak intensity in GW/cm2
"""
# calculate beam area
if area_type == "even":
radius_cm = wt_units.converter(beam_radius, beam_radius_units, "cm")
area_cm2 = np.pi * radius_cm ** 2 # cm^2
elif area_type == "average":
radius_cm = wt_units.converter(beam_radius, beam_radius_units, "cm")
area_cm2 = np.pi * radius_cm ** 2 # cm^2
area_cm2 /= 0.7213 # weight by average intensity felt by oscillator inside of FWHM
else:
raise NotImplementedError
# calculate fluence in uj/cm^2
ujcm2 = power_mW / reprate_Hz # mJ
ujcm2 *= 1e3 # uJ
ujcm2 /= area_cm2 # uJ/cm^2
# calculate fluence in photons/cm^2
energy = wt_units.converter(color, color_units, "eV") # eV
photonscm2 = ujcm2 * 1e-6 # J/cm2
photonscm2 /= 1.60218e-19 # eV/cm2
photonscm2 /= energy # photons/cm2
# calculate peak intensity in GW/cm^2
pulse_width_s = wt_units.converter(pulse_width, pulse_width_units, "s_t") # seconds
GWcm2 = ujcm2 / 1e6 # J/cm2
GWcm2 /= pulse_width_s # W/cm2
GWcm2 /= 1e9
# finish
return ujcm2, photonscm2, GWcm2 |
def find_point_in_section_list(point, section_list):
"""Returns the start of the section the given point belongs to.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5,
32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for
them the function returns 5, and 30, 30.7 and 31 all match [30-31].
Parameters
---------
point : float
The point for which to match a section.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
float
The start of the section the given point belongs to. None if no match
was found.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_point_in_section_list(4, seclist)
>>> find_point_in_section_list(5, seclist)
5
>>> find_point_in_section_list(27, seclist)
8
>>> find_point_in_section_list(31, seclist)
30
"""
if point < section_list[0] or point > section_list[-1]:
return None
if point in section_list:
if point == section_list[-1]:
return section_list[-2]
ind = section_list.bisect(point)-1
if ind == 0:
return section_list[0]
return section_list[ind]
try:
ind = section_list.bisect(point)
return section_list[ind-1]
except IndexError:
return None | Returns the start of the section the given point belongs to.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5,
32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for
them the function returns 5, and 30, 30.7 and 31 all match [30-31].
Parameters
---------
point : float
The point for which to match a section.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
float
The start of the section the given point belongs to. None if no match
was found.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_point_in_section_list(4, seclist)
>>> find_point_in_section_list(5, seclist)
5
>>> find_point_in_section_list(27, seclist)
8
>>> find_point_in_section_list(31, seclist)
30 | Below is the the instruction that describes the task:
### Input:
Returns the start of the section the given point belongs to.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5,
32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for
them the function returns 5, and 30, 30.7 and 31 all match [30-31].
Parameters
---------
point : float
The point for which to match a section.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
float
The start of the section the given point belongs to. None if no match
was found.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_point_in_section_list(4, seclist)
>>> find_point_in_section_list(5, seclist)
5
>>> find_point_in_section_list(27, seclist)
8
>>> find_point_in_section_list(31, seclist)
30
### Response:
def find_point_in_section_list(point, section_list):
"""Returns the start of the section the given point belongs to.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5,
32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for
them the function returns 5, and 30, 30.7 and 31 all match [30-31].
Parameters
---------
point : float
The point for which to match a section.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
float
The start of the section the given point belongs to. None if no match
was found.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_point_in_section_list(4, seclist)
>>> find_point_in_section_list(5, seclist)
5
>>> find_point_in_section_list(27, seclist)
8
>>> find_point_in_section_list(31, seclist)
30
"""
if point < section_list[0] or point > section_list[-1]:
return None
if point in section_list:
if point == section_list[-1]:
return section_list[-2]
ind = section_list.bisect(point)-1
if ind == 0:
return section_list[0]
return section_list[ind]
try:
ind = section_list.bisect(point)
return section_list[ind-1]
except IndexError:
return None |
def CreateString(self, s, encoding='utf-8', errors='strict'):
"""CreateString writes a null-terminated byte string as a vector."""
self.assertNotNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = True
## @endcond
if isinstance(s, compat.string_types):
x = s.encode(encoding, errors)
elif isinstance(s, compat.binary_types):
x = s
else:
raise TypeError("non-string passed to CreateString")
self.Prep(N.UOffsetTFlags.bytewidth, (len(x)+1)*N.Uint8Flags.bytewidth)
self.Place(0, N.Uint8Flags)
l = UOffsetTFlags.py_type(len(s))
## @cond FLATBUFFERS_INTERNAL
self.head = UOffsetTFlags.py_type(self.Head() - l)
## @endcond
self.Bytes[self.Head():self.Head()+l] = x
return self.EndVector(len(x)) | CreateString writes a null-terminated byte string as a vector. | Below is the the instruction that describes the task:
### Input:
CreateString writes a null-terminated byte string as a vector.
### Response:
def CreateString(self, s, encoding='utf-8', errors='strict'):
"""CreateString writes a null-terminated byte string as a vector."""
self.assertNotNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = True
## @endcond
if isinstance(s, compat.string_types):
x = s.encode(encoding, errors)
elif isinstance(s, compat.binary_types):
x = s
else:
raise TypeError("non-string passed to CreateString")
self.Prep(N.UOffsetTFlags.bytewidth, (len(x)+1)*N.Uint8Flags.bytewidth)
self.Place(0, N.Uint8Flags)
l = UOffsetTFlags.py_type(len(s))
## @cond FLATBUFFERS_INTERNAL
self.head = UOffsetTFlags.py_type(self.Head() - l)
## @endcond
self.Bytes[self.Head():self.Head()+l] = x
return self.EndVector(len(x)) |
def draw_scatter_plot(world, size, target):
""" This function can be used on a generic canvas (either an image to save
on disk or a canvas part of a GUI)
"""
#Find min and max values of humidity and temperature on land so we can
#normalize temperature and humidity to the chart
humid = numpy.ma.masked_array(world.layers['humidity'].data, mask=world.layers['ocean'].data)
temp = numpy.ma.masked_array(world.layers['temperature'].data, mask=world.layers['ocean'].data)
min_humidity = humid.min()
max_humidity = humid.max()
min_temperature = temp.min()
max_temperature = temp.max()
temperature_delta = max_temperature - min_temperature
humidity_delta = max_humidity - min_humidity
#set all pixels white
for y in range(0, size):
for x in range(0, size):
target.set_pixel(x, y, (255, 255, 255, 255))
#fill in 'bad' boxes with grey
h_values = ['62', '50', '37', '25', '12']
t_values = [ 0, 1, 2, 3, 5 ]
for loop in range(0, 5):
h_min = (size - 1) * ((world.layers['humidity'].quantiles[h_values[loop]] - min_humidity) / humidity_delta)
if loop != 4:
h_max = (size - 1) * ((world.layers['humidity'].quantiles[h_values[loop + 1]] - min_humidity) / humidity_delta)
else:
h_max = size
v_max = (size - 1) * ((world.layers['temperature'].thresholds[t_values[loop]][1] - min_temperature) / temperature_delta)
if h_min < 0:
h_min = 0
if h_max > size:
h_max = size
if v_max < 0:
v_max = 0
if v_max > (size - 1):
v_max = size - 1
if h_max > 0 and h_min < size and v_max > 0:
for y in range(int(h_min), int(h_max)):
for x in range(0, int(v_max)):
target.set_pixel(x, (size - 1) - y, (128, 128, 128, 255))
#draw lines based on thresholds
for t in range(0, 6):
v = (size - 1) * ((world.layers['temperature'].thresholds[t][1] - min_temperature) / temperature_delta)
if 0 < v < size:
for y in range(0, size):
target.set_pixel(int(v), (size - 1) - y, (0, 0, 0, 255))
ranges = ['87', '75', '62', '50', '37', '25', '12']
for p in ranges:
h = (size - 1) * ((world.layers['humidity'].quantiles[p] - min_humidity) / humidity_delta)
if 0 < h < size:
for x in range(0, size):
target.set_pixel(x, (size - 1) - int(h), (0, 0, 0, 255))
#draw gamma curve
curve_gamma = world.gamma_curve
curve_bonus = world.curve_offset
for x in range(0, size):
y = (size - 1) * ((numpy.power((float(x) / (size - 1)), curve_gamma) * (1 - curve_bonus)) + curve_bonus)
target.set_pixel(x, (size - 1) - int(y), (255, 0, 0, 255))
#examine all cells in the map and if it is land get the temperature and
#humidity for the cell.
for y in range(world.height):
for x in range(world.width):
if world.is_land((x, y)):
t = world.temperature_at((x, y))
p = world.humidity_at((x, y))
#get red and blue values depending on temperature and humidity
if world.is_temperature_polar((x, y)):
r = 0
elif world.is_temperature_alpine((x, y)):
r = 42
elif world.is_temperature_boreal((x, y)):
r = 85
elif world.is_temperature_cool((x, y)):
r = 128
elif world.is_temperature_warm((x, y)):
r = 170
elif world.is_temperature_subtropical((x, y)):
r = 213
elif world.is_temperature_tropical((x, y)):
r = 255
if world.is_humidity_superarid((x, y)):
b = 32
elif world.is_humidity_perarid((x, y)):
b = 64
elif world.is_humidity_arid((x, y)):
b = 96
elif world.is_humidity_semiarid((x, y)):
b = 128
elif world.is_humidity_subhumid((x, y)):
b = 160
elif world.is_humidity_humid((x, y)):
b = 192
elif world.is_humidity_perhumid((x, y)):
b = 224
elif world.is_humidity_superhumid((x, y)):
b = 255
#calculate x and y position based on normalized temperature and humidity
nx = (size - 1) * ((t - min_temperature) / temperature_delta)
ny = (size - 1) * ((p - min_humidity) / humidity_delta)
target.set_pixel(int(nx), (size - 1) - int(ny), (r, 128, b, 255)) | This function can be used on a generic canvas (either an image to save
on disk or a canvas part of a GUI) | Below is the the instruction that describes the task:
### Input:
This function can be used on a generic canvas (either an image to save
on disk or a canvas part of a GUI)
### Response:
def draw_scatter_plot(world, size, target):
""" This function can be used on a generic canvas (either an image to save
on disk or a canvas part of a GUI)
"""
#Find min and max values of humidity and temperature on land so we can
#normalize temperature and humidity to the chart
humid = numpy.ma.masked_array(world.layers['humidity'].data, mask=world.layers['ocean'].data)
temp = numpy.ma.masked_array(world.layers['temperature'].data, mask=world.layers['ocean'].data)
min_humidity = humid.min()
max_humidity = humid.max()
min_temperature = temp.min()
max_temperature = temp.max()
temperature_delta = max_temperature - min_temperature
humidity_delta = max_humidity - min_humidity
#set all pixels white
for y in range(0, size):
for x in range(0, size):
target.set_pixel(x, y, (255, 255, 255, 255))
#fill in 'bad' boxes with grey
h_values = ['62', '50', '37', '25', '12']
t_values = [ 0, 1, 2, 3, 5 ]
for loop in range(0, 5):
h_min = (size - 1) * ((world.layers['humidity'].quantiles[h_values[loop]] - min_humidity) / humidity_delta)
if loop != 4:
h_max = (size - 1) * ((world.layers['humidity'].quantiles[h_values[loop + 1]] - min_humidity) / humidity_delta)
else:
h_max = size
v_max = (size - 1) * ((world.layers['temperature'].thresholds[t_values[loop]][1] - min_temperature) / temperature_delta)
if h_min < 0:
h_min = 0
if h_max > size:
h_max = size
if v_max < 0:
v_max = 0
if v_max > (size - 1):
v_max = size - 1
if h_max > 0 and h_min < size and v_max > 0:
for y in range(int(h_min), int(h_max)):
for x in range(0, int(v_max)):
target.set_pixel(x, (size - 1) - y, (128, 128, 128, 255))
#draw lines based on thresholds
for t in range(0, 6):
v = (size - 1) * ((world.layers['temperature'].thresholds[t][1] - min_temperature) / temperature_delta)
if 0 < v < size:
for y in range(0, size):
target.set_pixel(int(v), (size - 1) - y, (0, 0, 0, 255))
ranges = ['87', '75', '62', '50', '37', '25', '12']
for p in ranges:
h = (size - 1) * ((world.layers['humidity'].quantiles[p] - min_humidity) / humidity_delta)
if 0 < h < size:
for x in range(0, size):
target.set_pixel(x, (size - 1) - int(h), (0, 0, 0, 255))
#draw gamma curve
curve_gamma = world.gamma_curve
curve_bonus = world.curve_offset
for x in range(0, size):
y = (size - 1) * ((numpy.power((float(x) / (size - 1)), curve_gamma) * (1 - curve_bonus)) + curve_bonus)
target.set_pixel(x, (size - 1) - int(y), (255, 0, 0, 255))
#examine all cells in the map and if it is land get the temperature and
#humidity for the cell.
for y in range(world.height):
for x in range(world.width):
if world.is_land((x, y)):
t = world.temperature_at((x, y))
p = world.humidity_at((x, y))
#get red and blue values depending on temperature and humidity
if world.is_temperature_polar((x, y)):
r = 0
elif world.is_temperature_alpine((x, y)):
r = 42
elif world.is_temperature_boreal((x, y)):
r = 85
elif world.is_temperature_cool((x, y)):
r = 128
elif world.is_temperature_warm((x, y)):
r = 170
elif world.is_temperature_subtropical((x, y)):
r = 213
elif world.is_temperature_tropical((x, y)):
r = 255
if world.is_humidity_superarid((x, y)):
b = 32
elif world.is_humidity_perarid((x, y)):
b = 64
elif world.is_humidity_arid((x, y)):
b = 96
elif world.is_humidity_semiarid((x, y)):
b = 128
elif world.is_humidity_subhumid((x, y)):
b = 160
elif world.is_humidity_humid((x, y)):
b = 192
elif world.is_humidity_perhumid((x, y)):
b = 224
elif world.is_humidity_superhumid((x, y)):
b = 255
#calculate x and y position based on normalized temperature and humidity
nx = (size - 1) * ((t - min_temperature) / temperature_delta)
ny = (size - 1) * ((p - min_humidity) / humidity_delta)
target.set_pixel(int(nx), (size - 1) - int(ny), (r, 128, b, 255)) |
def search_task_views(self, user, search_string):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SEARCH_TASK_VIEWS,
parameter1=user,
parameter2=search_string)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SEARCH_TASK_VIEWS)
return result | invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response | Below is the the instruction that describes the task:
### Input:
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
### Response:
def search_task_views(self, user, search_string):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SEARCH_TASK_VIEWS,
parameter1=user,
parameter2=search_string)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SEARCH_TASK_VIEWS)
return result |
def run(self, message_id, **kwargs):
"""
Load and contruct message and send them off
"""
log = self.get_logger(**kwargs)
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count >= self.max_error_retries:
raise MaxRetriesExceededError(
"Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
self.name, self.request.id, self.request.args, kwargs
)
)
log.info("Loading Outbound Message <%s>" % message_id)
try:
message = Outbound.objects.select_related("channel").get(id=message_id)
except ObjectDoesNotExist:
logger.error("Missing Outbound message", exc_info=True)
return
if message.attempts < settings.MESSAGE_SENDER_MAX_RETRIES:
if error_retry_count > 0:
retry_delay = calculate_retry_delay(error_retry_count)
else:
retry_delay = self.default_retry_delay
log.info("Attempts: %s" % message.attempts)
# send or resend
try:
if not message.channel:
channel = Channel.objects.get(default=True)
else:
channel = message.channel
sender = self.get_client(channel)
ConcurrencyLimiter.manage_limit(self, channel)
if not message.to_addr and message.to_identity:
message.to_addr = get_identity_address(
message.to_identity, use_communicate_through=True
)
if not message.to_addr:
self.fire_failed_msisdn_lookup(message.to_identity)
return
if message.to_addr and not message.to_identity:
result = get_identity_by_address(message.to_addr)
if result:
message.to_identity = result[0]["id"]
else:
identity = {
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {message.to_addr: {"default": True}}
},
}
}
identity = create_identity(identity)
message.to_identity = identity["id"]
if "voice_speech_url" in message.metadata:
# OBD number of tries metric
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.obd.tries.sum",
"metric_value": 1.0,
}
)
# Voice message
speech_url = message.metadata["voice_speech_url"]
vumiresponse = sender.send_voice(
voice_to_addr_formatter(message.to_addr),
message.content,
speech_url=speech_url,
session_event="new",
)
log.info("Sent voice message to <%s>" % message.to_addr)
elif "image_url" in message.metadata:
# Image message
image_url = message.metadata["image_url"]
vumiresponse = sender.send_image(
text_to_addr_formatter(message.to_addr),
message.content,
image_url=image_url,
)
log.info("Sent image message to <%s>" % (message.to_addr,))
else:
# Plain content
vumiresponse = sender.send_text(
text_to_addr_formatter(message.to_addr),
message.content,
metadata=message.metadata,
session_event="new",
)
log.info("Sent text message to <%s>" % (message.to_addr,))
message.last_sent_time = timezone.now()
message.attempts += 1
message.vumi_message_id = vumiresponse["message_id"]
message.save()
fire_metric.apply_async(
kwargs={"metric_name": "vumimessage.tries.sum", "metric_value": 1.0}
)
except requests_exceptions.ConnectionError as exc:
log.info("Connection Error sending message")
fire_metric.delay("sender.send_message.connection_error.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except requests_exceptions.Timeout as exc:
log.info("Sending message failed due to timeout")
fire_metric.delay("sender.send_message.timeout.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except (requests_exceptions.HTTPError, HTTPServiceError) as exc:
# retry message sending if in 500 range (3 default
# retries)
log.info(
"Sending message failed due to status: %s"
% exc.response.status_code
)
metric_name = (
"sender.send_message.http_error.%s.sum" % exc.response.status_code
)
fire_metric.delay(metric_name, 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
# If we've gotten this far the message send was successful.
fire_metric.apply_async(
kwargs={"metric_name": "message.sent.sum", "metric_value": 1.0}
)
return vumiresponse
else:
# This is for retries based on async nacks from the transport.
log.info("Message <%s> at max retries." % str(message_id))
message.to_addr = ""
message.save(update_fields=["to_addr"])
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.maxretries.sum",
"metric_value": 1.0,
}
)
# Count failures on exhausted tries.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
) | Load and contruct message and send them off | Below is the the instruction that describes the task:
### Input:
Load and contruct message and send them off
### Response:
def run(self, message_id, **kwargs):
"""
Load and contruct message and send them off
"""
log = self.get_logger(**kwargs)
error_retry_count = kwargs.get("error_retry_count", 0)
if error_retry_count >= self.max_error_retries:
raise MaxRetriesExceededError(
"Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
self.name, self.request.id, self.request.args, kwargs
)
)
log.info("Loading Outbound Message <%s>" % message_id)
try:
message = Outbound.objects.select_related("channel").get(id=message_id)
except ObjectDoesNotExist:
logger.error("Missing Outbound message", exc_info=True)
return
if message.attempts < settings.MESSAGE_SENDER_MAX_RETRIES:
if error_retry_count > 0:
retry_delay = calculate_retry_delay(error_retry_count)
else:
retry_delay = self.default_retry_delay
log.info("Attempts: %s" % message.attempts)
# send or resend
try:
if not message.channel:
channel = Channel.objects.get(default=True)
else:
channel = message.channel
sender = self.get_client(channel)
ConcurrencyLimiter.manage_limit(self, channel)
if not message.to_addr and message.to_identity:
message.to_addr = get_identity_address(
message.to_identity, use_communicate_through=True
)
if not message.to_addr:
self.fire_failed_msisdn_lookup(message.to_identity)
return
if message.to_addr and not message.to_identity:
result = get_identity_by_address(message.to_addr)
if result:
message.to_identity = result[0]["id"]
else:
identity = {
"details": {
"default_addr_type": "msisdn",
"addresses": {
"msisdn": {message.to_addr: {"default": True}}
},
}
}
identity = create_identity(identity)
message.to_identity = identity["id"]
if "voice_speech_url" in message.metadata:
# OBD number of tries metric
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.obd.tries.sum",
"metric_value": 1.0,
}
)
# Voice message
speech_url = message.metadata["voice_speech_url"]
vumiresponse = sender.send_voice(
voice_to_addr_formatter(message.to_addr),
message.content,
speech_url=speech_url,
session_event="new",
)
log.info("Sent voice message to <%s>" % message.to_addr)
elif "image_url" in message.metadata:
# Image message
image_url = message.metadata["image_url"]
vumiresponse = sender.send_image(
text_to_addr_formatter(message.to_addr),
message.content,
image_url=image_url,
)
log.info("Sent image message to <%s>" % (message.to_addr,))
else:
# Plain content
vumiresponse = sender.send_text(
text_to_addr_formatter(message.to_addr),
message.content,
metadata=message.metadata,
session_event="new",
)
log.info("Sent text message to <%s>" % (message.to_addr,))
message.last_sent_time = timezone.now()
message.attempts += 1
message.vumi_message_id = vumiresponse["message_id"]
message.save()
fire_metric.apply_async(
kwargs={"metric_name": "vumimessage.tries.sum", "metric_value": 1.0}
)
except requests_exceptions.ConnectionError as exc:
log.info("Connection Error sending message")
fire_metric.delay("sender.send_message.connection_error.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except requests_exceptions.Timeout as exc:
log.info("Sending message failed due to timeout")
fire_metric.delay("sender.send_message.timeout.sum", 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
except (requests_exceptions.HTTPError, HTTPServiceError) as exc:
# retry message sending if in 500 range (3 default
# retries)
log.info(
"Sending message failed due to status: %s"
% exc.response.status_code
)
metric_name = (
"sender.send_message.http_error.%s.sum" % exc.response.status_code
)
fire_metric.delay(metric_name, 1)
kwargs["error_retry_count"] = error_retry_count + 1
self.retry(
exc=exc, countdown=retry_delay, args=(message_id,), kwargs=kwargs
)
# If we've gotten this far the message send was successful.
fire_metric.apply_async(
kwargs={"metric_name": "message.sent.sum", "metric_value": 1.0}
)
return vumiresponse
else:
# This is for retries based on async nacks from the transport.
log.info("Message <%s> at max retries." % str(message_id))
message.to_addr = ""
message.save(update_fields=["to_addr"])
fire_metric.apply_async(
kwargs={
"metric_name": "vumimessage.maxretries.sum",
"metric_value": 1.0,
}
)
# Count failures on exhausted tries.
fire_metric.apply_async(
kwargs={"metric_name": "message.failures.sum", "metric_value": 1.0}
) |
def server_setting(name, settings=None, server=_DEFAULT_SERVER):
'''
Ensure the value is set for the specified setting.
.. note::
The setting names are case-sensitive.
:param str settings: A dictionary of the setting names and their values.
:param str server: The SMTP server name.
Example of usage:
.. code-block:: yaml
smtp-settings:
win_smtp_server.server_setting:
- settings:
LogType: 1
LogFilePeriod: 1
MaxMessageSize: 16777216
MaxRecipients: 10000
MaxSessionSize: 16777216
'''
ret = {'name': name,
'changes': {},
'comment': six.text_type(),
'result': None}
if not settings:
ret['comment'] = 'No settings to change provided.'
ret['result'] = True
return ret
ret_settings = dict()
ret_settings['changes'] = {}
ret_settings['failures'] = {}
current_settings = __salt__['win_smtp_server.get_server_setting'](settings=settings.keys(),
server=server)
for key in settings:
# Some fields are formatted like '{data}'. Salt/Python converts these to dicts
# automatically on input, so convert them back to the proper format.
settings = _normalize_server_settings(**settings)
if six.text_type(settings[key]) != six.text_type(current_settings[key]):
ret_settings['changes'][key] = {'old': current_settings[key],
'new': settings[key]}
if not ret_settings['changes']:
ret['comment'] = 'Settings already contain the provided values.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Settings will be changed.'
ret['changes'] = ret_settings
return ret
__salt__['win_smtp_server.set_server_setting'](settings=settings, server=server)
new_settings = __salt__['win_smtp_server.get_server_setting'](settings=settings.keys(),
server=server)
for key in settings:
if six.text_type(new_settings[key]) != six.text_type(settings[key]):
ret_settings['failures'][key] = {'old': current_settings[key],
'new': new_settings[key]}
ret_settings['changes'].pop(key, None)
if ret_settings['failures']:
ret['comment'] = 'Some settings failed to change.'
ret['changes'] = ret_settings
ret['result'] = False
else:
ret['comment'] = 'Set settings to contain the provided values.'
ret['changes'] = ret_settings['changes']
ret['result'] = True
return ret | Ensure the value is set for the specified setting.
.. note::
The setting names are case-sensitive.
:param str settings: A dictionary of the setting names and their values.
:param str server: The SMTP server name.
Example of usage:
.. code-block:: yaml
smtp-settings:
win_smtp_server.server_setting:
- settings:
LogType: 1
LogFilePeriod: 1
MaxMessageSize: 16777216
MaxRecipients: 10000
MaxSessionSize: 16777216 | Below is the the instruction that describes the task:
### Input:
Ensure the value is set for the specified setting.
.. note::
The setting names are case-sensitive.
:param str settings: A dictionary of the setting names and their values.
:param str server: The SMTP server name.
Example of usage:
.. code-block:: yaml
smtp-settings:
win_smtp_server.server_setting:
- settings:
LogType: 1
LogFilePeriod: 1
MaxMessageSize: 16777216
MaxRecipients: 10000
MaxSessionSize: 16777216
### Response:
def server_setting(name, settings=None, server=_DEFAULT_SERVER):
'''
Ensure the value is set for the specified setting.
.. note::
The setting names are case-sensitive.
:param str settings: A dictionary of the setting names and their values.
:param str server: The SMTP server name.
Example of usage:
.. code-block:: yaml
smtp-settings:
win_smtp_server.server_setting:
- settings:
LogType: 1
LogFilePeriod: 1
MaxMessageSize: 16777216
MaxRecipients: 10000
MaxSessionSize: 16777216
'''
ret = {'name': name,
'changes': {},
'comment': six.text_type(),
'result': None}
if not settings:
ret['comment'] = 'No settings to change provided.'
ret['result'] = True
return ret
ret_settings = dict()
ret_settings['changes'] = {}
ret_settings['failures'] = {}
current_settings = __salt__['win_smtp_server.get_server_setting'](settings=settings.keys(),
server=server)
for key in settings:
# Some fields are formatted like '{data}'. Salt/Python converts these to dicts
# automatically on input, so convert them back to the proper format.
settings = _normalize_server_settings(**settings)
if six.text_type(settings[key]) != six.text_type(current_settings[key]):
ret_settings['changes'][key] = {'old': current_settings[key],
'new': settings[key]}
if not ret_settings['changes']:
ret['comment'] = 'Settings already contain the provided values.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Settings will be changed.'
ret['changes'] = ret_settings
return ret
__salt__['win_smtp_server.set_server_setting'](settings=settings, server=server)
new_settings = __salt__['win_smtp_server.get_server_setting'](settings=settings.keys(),
server=server)
for key in settings:
if six.text_type(new_settings[key]) != six.text_type(settings[key]):
ret_settings['failures'][key] = {'old': current_settings[key],
'new': new_settings[key]}
ret_settings['changes'].pop(key, None)
if ret_settings['failures']:
ret['comment'] = 'Some settings failed to change.'
ret['changes'] = ret_settings
ret['result'] = False
else:
ret['comment'] = 'Set settings to contain the provided values.'
ret['changes'] = ret_settings['changes']
ret['result'] = True
return ret |
def measure(note1, note2):
"""Return an integer in the range of 0-11, determining the half note steps
between note1 and note2.
Examples:
>>> measure('C', 'D')
2
>>> measure('D', 'C')
10
"""
res = notes.note_to_int(note2) - notes.note_to_int(note1)
if res < 0:
return 12 - res * -1
else:
return res | Return an integer in the range of 0-11, determining the half note steps
between note1 and note2.
Examples:
>>> measure('C', 'D')
2
>>> measure('D', 'C')
10 | Below is the the instruction that describes the task:
### Input:
Return an integer in the range of 0-11, determining the half note steps
between note1 and note2.
Examples:
>>> measure('C', 'D')
2
>>> measure('D', 'C')
10
### Response:
def measure(note1, note2):
"""Return an integer in the range of 0-11, determining the half note steps
between note1 and note2.
Examples:
>>> measure('C', 'D')
2
>>> measure('D', 'C')
10
"""
res = notes.note_to_int(note2) - notes.note_to_int(note1)
if res < 0:
return 12 - res * -1
else:
return res |
def create(self):
"""
Create a new ConfigurationInstance
:returns: Newly created ConfigurationInstance
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance
"""
data = values.of({})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ConfigurationInstance(self._version, payload, ) | Create a new ConfigurationInstance
:returns: Newly created ConfigurationInstance
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance | Below is the the instruction that describes the task:
### Input:
Create a new ConfigurationInstance
:returns: Newly created ConfigurationInstance
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance
### Response:
def create(self):
"""
Create a new ConfigurationInstance
:returns: Newly created ConfigurationInstance
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance
"""
data = values.of({})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ConfigurationInstance(self._version, payload, ) |
def create_vlan(self):
"""Get an instance of vlan services facade."""
return Vlan(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of vlan services facade. | Below is the the instruction that describes the task:
### Input:
Get an instance of vlan services facade.
### Response:
def create_vlan(self):
"""Get an instance of vlan services facade."""
return Vlan(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) |
def setup_interpreter(distributions, interpreter=None):
"""Return an interpreter configured with vendored distributions as extras.
Any distributions that are present in the vendored set will be added to the interpreter as extras.
:param distributions: The names of distributions to setup the interpreter with.
:type distributions: list of str
:param interpreter: An optional interpreter to configure. If ``None``, the current interpreter is
used.
:type interpreter: :class:`pex.interpreter.PythonInterpreter`
:return: An bare interpreter configured with vendored extras.
:rtype: :class:`pex.interpreter.PythonInterpreter`
"""
from pex.interpreter import PythonInterpreter
interpreter = interpreter or PythonInterpreter.get()
for dist in _vendored_dists(OrderedSet(distributions)):
interpreter = interpreter.with_extra(dist.key, dist.version, dist.location)
return interpreter | Return an interpreter configured with vendored distributions as extras.
Any distributions that are present in the vendored set will be added to the interpreter as extras.
:param distributions: The names of distributions to setup the interpreter with.
:type distributions: list of str
:param interpreter: An optional interpreter to configure. If ``None``, the current interpreter is
used.
:type interpreter: :class:`pex.interpreter.PythonInterpreter`
:return: An bare interpreter configured with vendored extras.
:rtype: :class:`pex.interpreter.PythonInterpreter` | Below is the the instruction that describes the task:
### Input:
Return an interpreter configured with vendored distributions as extras.
Any distributions that are present in the vendored set will be added to the interpreter as extras.
:param distributions: The names of distributions to setup the interpreter with.
:type distributions: list of str
:param interpreter: An optional interpreter to configure. If ``None``, the current interpreter is
used.
:type interpreter: :class:`pex.interpreter.PythonInterpreter`
:return: An bare interpreter configured with vendored extras.
:rtype: :class:`pex.interpreter.PythonInterpreter`
### Response:
def setup_interpreter(distributions, interpreter=None):
"""Return an interpreter configured with vendored distributions as extras.
Any distributions that are present in the vendored set will be added to the interpreter as extras.
:param distributions: The names of distributions to setup the interpreter with.
:type distributions: list of str
:param interpreter: An optional interpreter to configure. If ``None``, the current interpreter is
used.
:type interpreter: :class:`pex.interpreter.PythonInterpreter`
:return: An bare interpreter configured with vendored extras.
:rtype: :class:`pex.interpreter.PythonInterpreter`
"""
from pex.interpreter import PythonInterpreter
interpreter = interpreter or PythonInterpreter.get()
for dist in _vendored_dists(OrderedSet(distributions)):
interpreter = interpreter.with_extra(dist.key, dist.version, dist.location)
return interpreter |
def parse_year_days(year_info):
"""Parse year days from a year info.
"""
leap_month, leap_days = _parse_leap(year_info)
res = leap_days
for month in range(1, 13):
res += (year_info >> (16 - month)) % 2 + 29
return res | Parse year days from a year info. | Below is the the instruction that describes the task:
### Input:
Parse year days from a year info.
### Response:
def parse_year_days(year_info):
"""Parse year days from a year info.
"""
leap_month, leap_days = _parse_leap(year_info)
res = leap_days
for month in range(1, 13):
res += (year_info >> (16 - month)) % 2 + 29
return res |
def removeSessionWithKey(self, key):
"""
Remove a persistent session, if it exists.
@type key: L{bytes}
@param key: The persistent session identifier.
"""
self.store.query(
PersistentSession,
PersistentSession.sessionKey == key).deleteFromStore() | Remove a persistent session, if it exists.
@type key: L{bytes}
@param key: The persistent session identifier. | Below is the the instruction that describes the task:
### Input:
Remove a persistent session, if it exists.
@type key: L{bytes}
@param key: The persistent session identifier.
### Response:
def removeSessionWithKey(self, key):
"""
Remove a persistent session, if it exists.
@type key: L{bytes}
@param key: The persistent session identifier.
"""
self.store.query(
PersistentSession,
PersistentSession.sessionKey == key).deleteFromStore() |
def _check_special_kwargs(self, name):
''' check special functions for kwargs
Checks the content of the special functions (%methodname) for
any keyword arguments referenced within
Parameters:
name (str):
A path key name
Returns:
A list of keyword arguments found in any special functions
'''
keys = []
# find any %method names in the template string
functions = re.findall(r"\%\w+", self.templates[name])
if not functions:
return keys
# loop over special method names and extract keywords
for function in functions:
method = getattr(self, function[1:])
# get source code of special method
source = self._find_source(method)
fkeys = re.findall(r'kwargs\[(.*?)\]', source)
if fkeys:
# evaluate to proper string
fkeys = [ast.literal_eval(k) for k in fkeys]
keys.extend(fkeys)
return keys | check special functions for kwargs
Checks the content of the special functions (%methodname) for
any keyword arguments referenced within
Parameters:
name (str):
A path key name
Returns:
A list of keyword arguments found in any special functions | Below is the the instruction that describes the task:
### Input:
check special functions for kwargs
Checks the content of the special functions (%methodname) for
any keyword arguments referenced within
Parameters:
name (str):
A path key name
Returns:
A list of keyword arguments found in any special functions
### Response:
def _check_special_kwargs(self, name):
''' check special functions for kwargs
Checks the content of the special functions (%methodname) for
any keyword arguments referenced within
Parameters:
name (str):
A path key name
Returns:
A list of keyword arguments found in any special functions
'''
keys = []
# find any %method names in the template string
functions = re.findall(r"\%\w+", self.templates[name])
if not functions:
return keys
# loop over special method names and extract keywords
for function in functions:
method = getattr(self, function[1:])
# get source code of special method
source = self._find_source(method)
fkeys = re.findall(r'kwargs\[(.*?)\]', source)
if fkeys:
# evaluate to proper string
fkeys = [ast.literal_eval(k) for k in fkeys]
keys.extend(fkeys)
return keys |
def cmd(send, msg, args):
"""Nukes somebody.
Syntax: {command} <target>
"""
c, nick = args['handler'].connection, args['nick']
channel = args['target'] if args['target'] != 'private' else args['config']['core']['channel']
if not msg:
send("Nuke who?")
return
with args['handler'].data_lock:
users = args['handler'].channels[channel].users()
if msg in users:
do_nuke(c, nick, msg, channel)
elif msg == args['botnick']:
send("Sorry, Self-Nuking is disabled pending aquisition of a Lead-Lined Fridge.")
else:
send("I'm sorry. Anonymous Nuking is not allowed") | Nukes somebody.
Syntax: {command} <target> | Below is the the instruction that describes the task:
### Input:
Nukes somebody.
Syntax: {command} <target>
### Response:
def cmd(send, msg, args):
"""Nukes somebody.
Syntax: {command} <target>
"""
c, nick = args['handler'].connection, args['nick']
channel = args['target'] if args['target'] != 'private' else args['config']['core']['channel']
if not msg:
send("Nuke who?")
return
with args['handler'].data_lock:
users = args['handler'].channels[channel].users()
if msg in users:
do_nuke(c, nick, msg, channel)
elif msg == args['botnick']:
send("Sorry, Self-Nuking is disabled pending aquisition of a Lead-Lined Fridge.")
else:
send("I'm sorry. Anonymous Nuking is not allowed") |
def psit(t, x, xf):
""" A function of t, X_t and X_{t+1} (f=future) """
if t == 0:
return psi0(x) + psit(1, x, xf)
else:
return -0.5 / sigma0**2 + (0.5 / sigma0**4) * ((xf - mu0) -
phi0 * (x - mu0))**2 | A function of t, X_t and X_{t+1} (f=future) | Below is the the instruction that describes the task:
### Input:
A function of t, X_t and X_{t+1} (f=future)
### Response:
def psit(t, x, xf):
""" A function of t, X_t and X_{t+1} (f=future) """
if t == 0:
return psi0(x) + psit(1, x, xf)
else:
return -0.5 / sigma0**2 + (0.5 / sigma0**4) * ((xf - mu0) -
phi0 * (x - mu0))**2 |
def remove_keywords_from_list(self, keyword_list):
"""To remove keywords present in list
Args:
keyword_list (list(str)): List of keywords to remove
Examples:
>>> keyword_processor.remove_keywords_from_list(["java", "python"]})
Raises:
AttributeError: If `keyword_list` is not a list.
"""
if not isinstance(keyword_list, list):
raise AttributeError("keyword_list should be a list")
for keyword in keyword_list:
self.remove_keyword(keyword) | To remove keywords present in list
Args:
keyword_list (list(str)): List of keywords to remove
Examples:
>>> keyword_processor.remove_keywords_from_list(["java", "python"]})
Raises:
AttributeError: If `keyword_list` is not a list. | Below is the the instruction that describes the task:
### Input:
To remove keywords present in list
Args:
keyword_list (list(str)): List of keywords to remove
Examples:
>>> keyword_processor.remove_keywords_from_list(["java", "python"]})
Raises:
AttributeError: If `keyword_list` is not a list.
### Response:
def remove_keywords_from_list(self, keyword_list):
"""To remove keywords present in list
Args:
keyword_list (list(str)): List of keywords to remove
Examples:
>>> keyword_processor.remove_keywords_from_list(["java", "python"]})
Raises:
AttributeError: If `keyword_list` is not a list.
"""
if not isinstance(keyword_list, list):
raise AttributeError("keyword_list should be a list")
for keyword in keyword_list:
self.remove_keyword(keyword) |
def set_user_password(environment, parameter, password):
"""
Sets a user's password in the keyring storage
"""
username = '%s:%s' % (environment, parameter)
return password_set(username, password) | Sets a user's password in the keyring storage | Below is the the instruction that describes the task:
### Input:
Sets a user's password in the keyring storage
### Response:
def set_user_password(environment, parameter, password):
"""
Sets a user's password in the keyring storage
"""
username = '%s:%s' % (environment, parameter)
return password_set(username, password) |
def writeMNIST(sc, input_images, input_labels, output, format, num_partitions):
"""Writes MNIST image/label vectors into parallelized files on HDFS"""
# load MNIST gzip into memory
with open(input_images, 'rb') as f:
images = numpy.array(mnist.extract_images(f))
with open(input_labels, 'rb') as f:
if format == "csv2":
labels = numpy.array(mnist.extract_labels(f, one_hot=False))
else:
labels = numpy.array(mnist.extract_labels(f, one_hot=True))
shape = images.shape
print("images.shape: {0}".format(shape)) # 60000 x 28 x 28
print("labels.shape: {0}".format(labels.shape)) # 60000 x 10
# create RDDs of vectors
imageRDD = sc.parallelize(images.reshape(shape[0], shape[1] * shape[2]), num_partitions)
labelRDD = sc.parallelize(labels, num_partitions)
output_images = output + "/images"
output_labels = output + "/labels"
# save RDDs as specific format
if format == "pickle":
imageRDD.saveAsPickleFile(output_images)
labelRDD.saveAsPickleFile(output_labels)
elif format == "csv":
imageRDD.map(toCSV).saveAsTextFile(output_images)
labelRDD.map(toCSV).saveAsTextFile(output_labels)
elif format == "csv2":
imageRDD.map(toCSV).zip(labelRDD).map(lambda x: str(x[1]) + "|" + x[0]).saveAsTextFile(output)
else: # format == "tfr":
tfRDD = imageRDD.zip(labelRDD).map(lambda x: (bytearray(toTFExample(x[0], x[1])), None))
# requires: --jars tensorflow-hadoop-1.0-SNAPSHOT.jar
tfRDD.saveAsNewAPIHadoopFile(output, "org.tensorflow.hadoop.io.TFRecordFileOutputFormat",
keyClass="org.apache.hadoop.io.BytesWritable",
valueClass="org.apache.hadoop.io.NullWritable") | Writes MNIST image/label vectors into parallelized files on HDFS | Below is the the instruction that describes the task:
### Input:
Writes MNIST image/label vectors into parallelized files on HDFS
### Response:
def writeMNIST(sc, input_images, input_labels, output, format, num_partitions):
"""Writes MNIST image/label vectors into parallelized files on HDFS"""
# load MNIST gzip into memory
with open(input_images, 'rb') as f:
images = numpy.array(mnist.extract_images(f))
with open(input_labels, 'rb') as f:
if format == "csv2":
labels = numpy.array(mnist.extract_labels(f, one_hot=False))
else:
labels = numpy.array(mnist.extract_labels(f, one_hot=True))
shape = images.shape
print("images.shape: {0}".format(shape)) # 60000 x 28 x 28
print("labels.shape: {0}".format(labels.shape)) # 60000 x 10
# create RDDs of vectors
imageRDD = sc.parallelize(images.reshape(shape[0], shape[1] * shape[2]), num_partitions)
labelRDD = sc.parallelize(labels, num_partitions)
output_images = output + "/images"
output_labels = output + "/labels"
# save RDDs as specific format
if format == "pickle":
imageRDD.saveAsPickleFile(output_images)
labelRDD.saveAsPickleFile(output_labels)
elif format == "csv":
imageRDD.map(toCSV).saveAsTextFile(output_images)
labelRDD.map(toCSV).saveAsTextFile(output_labels)
elif format == "csv2":
imageRDD.map(toCSV).zip(labelRDD).map(lambda x: str(x[1]) + "|" + x[0]).saveAsTextFile(output)
else: # format == "tfr":
tfRDD = imageRDD.zip(labelRDD).map(lambda x: (bytearray(toTFExample(x[0], x[1])), None))
# requires: --jars tensorflow-hadoop-1.0-SNAPSHOT.jar
tfRDD.saveAsNewAPIHadoopFile(output, "org.tensorflow.hadoop.io.TFRecordFileOutputFormat",
keyClass="org.apache.hadoop.io.BytesWritable",
valueClass="org.apache.hadoop.io.NullWritable") |
def mtf_transformer_lm_baseline():
"""Small language model to run on 1 TPU.
Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs)
Results:
params/10^9 log-ppl(per-token)
0.14 3.202
Returns:
a hparams
"""
hparams = mtf_transformer_paper_lm(-1)
hparams.batch_size = 128
hparams.learning_rate_decay_steps = 27200 # one epoch on lm1b
hparams.mesh_shape = "batch:8"
return hparams | Small language model to run on 1 TPU.
Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs)
Results:
params/10^9 log-ppl(per-token)
0.14 3.202
Returns:
a hparams | Below is the the instruction that describes the task:
### Input:
Small language model to run on 1 TPU.
Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs)
Results:
params/10^9 log-ppl(per-token)
0.14 3.202
Returns:
a hparams
### Response:
def mtf_transformer_lm_baseline():
"""Small language model to run on 1 TPU.
Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs)
Results:
params/10^9 log-ppl(per-token)
0.14 3.202
Returns:
a hparams
"""
hparams = mtf_transformer_paper_lm(-1)
hparams.batch_size = 128
hparams.learning_rate_decay_steps = 27200 # one epoch on lm1b
hparams.mesh_shape = "batch:8"
return hparams |
def bench(client, n):
""" Benchmark n requests """
items = list(range(n))
# Time client publish operations
# ------------------------------
started = time.time()
msg = b'x'
for i in items:
client.socket.send(msg)
res = client.socket.recv()
assert msg == res
duration = time.time() - started
print('Raw REQ client stats:')
util.print_stats(n, duration) | Benchmark n requests | Below is the the instruction that describes the task:
### Input:
Benchmark n requests
### Response:
def bench(client, n):
""" Benchmark n requests """
items = list(range(n))
# Time client publish operations
# ------------------------------
started = time.time()
msg = b'x'
for i in items:
client.socket.send(msg)
res = client.socket.recv()
assert msg == res
duration = time.time() - started
print('Raw REQ client stats:')
util.print_stats(n, duration) |
def _gather_active_forms(self):
"""Collect all the active forms of each Agent in the Statements."""
for stmt in self.statements:
if isinstance(stmt, ActiveForm):
base_agent = self.agent_set.get_create_base_agent(stmt.agent)
# Handle the case where an activity flag is set
agent_to_add = stmt.agent
if stmt.agent.activity:
new_agent = fast_deepcopy(stmt.agent)
new_agent.activity = None
agent_to_add = new_agent
base_agent.add_activity_form(agent_to_add, stmt.is_active) | Collect all the active forms of each Agent in the Statements. | Below is the the instruction that describes the task:
### Input:
Collect all the active forms of each Agent in the Statements.
### Response:
def _gather_active_forms(self):
"""Collect all the active forms of each Agent in the Statements."""
for stmt in self.statements:
if isinstance(stmt, ActiveForm):
base_agent = self.agent_set.get_create_base_agent(stmt.agent)
# Handle the case where an activity flag is set
agent_to_add = stmt.agent
if stmt.agent.activity:
new_agent = fast_deepcopy(stmt.agent)
new_agent.activity = None
agent_to_add = new_agent
base_agent.add_activity_form(agent_to_add, stmt.is_active) |
def filter(self, networks):
"""
Returns a new experimental setup restricted to species present in the given list of networks
Parameters
----------
networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList`
List of logical networks
Returns
-------
caspo.core.setup.Setup
The restricted experimental setup
"""
cues = self.stimuli + self.inhibitors
active_cues = set()
active_readouts = set()
for clause, var in networks.mappings:
active_cues = active_cues.union((l for (l, s) in clause if l in cues))
if var in self.readouts:
active_readouts.add(var)
return Setup(active_cues.intersection(self.stimuli), active_cues.intersection(self.inhibitors), active_readouts) | Returns a new experimental setup restricted to species present in the given list of networks
Parameters
----------
networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList`
List of logical networks
Returns
-------
caspo.core.setup.Setup
The restricted experimental setup | Below is the the instruction that describes the task:
### Input:
Returns a new experimental setup restricted to species present in the given list of networks
Parameters
----------
networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList`
List of logical networks
Returns
-------
caspo.core.setup.Setup
The restricted experimental setup
### Response:
def filter(self, networks):
"""
Returns a new experimental setup restricted to species present in the given list of networks
Parameters
----------
networks : :class:`caspo.core.logicalnetwork.LogicalNetworkList`
List of logical networks
Returns
-------
caspo.core.setup.Setup
The restricted experimental setup
"""
cues = self.stimuli + self.inhibitors
active_cues = set()
active_readouts = set()
for clause, var in networks.mappings:
active_cues = active_cues.union((l for (l, s) in clause if l in cues))
if var in self.readouts:
active_readouts.add(var)
return Setup(active_cues.intersection(self.stimuli), active_cues.intersection(self.inhibitors), active_readouts) |
def recvmsgs(sk, cb):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L775.
This is where callbacks are called.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
cb -- callbacks (nl_cb class instance).
Returns:
Number of bytes received or a negative error code.
"""
multipart = 0
interrupted = 0
nrecv = 0
buf = bytearray()
# nla is passed on to not only to nl_recv() but may also be passed to a function pointer provided by the caller
# which may or may not initialize the variable. Thomas Graf.
nla = sockaddr_nl()
creds = ucred()
while True: # This is the `goto continue_reading` implementation.
_LOGGER.debug('Attempting to read from 0x%x', id(sk))
n = c_int(cb.cb_recv_ow(sk, nla, buf, creds) if cb.cb_recv_ow else nl_recv(sk, nla, buf, creds))
if n.value <= 0:
return n.value
_LOGGER.debug('recvmsgs(0x%x): Read %d bytes', id(sk), n.value)
hdr = nlmsghdr(bytearray_ptr(buf))
while nlmsg_ok(hdr, n):
_LOGGER.debug('recvmsgs(0x%x): Processing valid message...', id(sk))
msg = nlmsg_convert(hdr)
nlmsg_set_proto(msg, sk.s_proto)
nlmsg_set_src(msg, nla)
if creds:
raise NotImplementedError # nlmsg_set_creds(msg, creds)
nrecv += 1
# Raw callback is the first, it gives the most control to the user and he can do his very own parsing.
if cb.cb_set[NL_CB_MSG_IN]:
err = nl_cb_call(cb, NL_CB_MSG_IN, msg) # NL_CB_CALL(cb, NL_CB_MSG_IN, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
if cb.cb_set[NL_CB_SEQ_CHECK]:
# Sequence number checking. The check may be done by the user, otherwise a very simple check is applied
# enforcing strict ordering.
err = nl_cb_call(cb, NL_CB_SEQ_CHECK, msg) # NL_CB_CALL(cb, NL_CB_SEQ_CHECK, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
elif not sk.s_flags & NL_NO_AUTO_ACK:
# Only do sequence checking if auto-ack mode is enabled.
if hdr.nlmsg_seq != sk.s_seq_expect:
if cb.cb_set[NL_CB_INVALID]:
err = nl_cb_call(cb, NL_CB_INVALID, msg) # NL_CB_CALL(cb, NL_CB_INVALID, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
return -NLE_SEQ_MISMATCH
if hdr.nlmsg_type in (NLMSG_DONE, NLMSG_ERROR, NLMSG_NOOP, NLMSG_OVERRUN):
# We can't check for !NLM_F_MULTI since some Netlink users in the kernel are broken.
sk.s_seq_expect += 1
_LOGGER.debug('recvmsgs(0x%x): Increased expected sequence number to %d', id(sk), sk.s_seq_expect)
if hdr.nlmsg_flags & NLM_F_MULTI:
multipart = 1
if hdr.nlmsg_flags & NLM_F_DUMP_INTR:
if cb.cb_set[NL_CB_DUMP_INTR]:
err = nl_cb_call(cb, NL_CB_DUMP_INTR, msg) # NL_CB_CALL(cb, NL_CB_DUMP_INTR, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
# We have to continue reading to clear all messages until a NLMSG_DONE is received and report the
# inconsistency.
interrupted = 1
if hdr.nlmsg_flags & NLM_F_ACK:
# Other side wishes to see an ack for this message.
if cb.cb_set[NL_CB_SEND_ACK]:
err = nl_cb_call(cb, NL_CB_SEND_ACK, msg) # NL_CB_CALL(cb, NL_CB_SEND_ACK, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
if hdr.nlmsg_type == NLMSG_DONE:
# Messages terminates a multipart message, this is usually the end of a message and therefore we slip
# out of the loop by default. the user may overrule this action by skipping this packet.
multipart = 0
if cb.cb_set[NL_CB_FINISH]:
err = nl_cb_call(cb, NL_CB_FINISH, msg) # NL_CB_CALL(cb, NL_CB_FINISH, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
elif hdr.nlmsg_type == NLMSG_NOOP:
# Message to be ignored, the default action is to skip this message if no callback is specified. The
# user may overrule this action by returning NL_PROCEED.
if cb.cb_set[NL_CB_SKIPPED]:
err = nl_cb_call(cb, NL_CB_SKIPPED, msg) # NL_CB_CALL(cb, NL_CB_SKIPPED, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
hdr = nlmsg_next(hdr, n)
continue
elif hdr.nlmsg_type == NLMSG_OVERRUN:
# Data got lost, report back to user. The default action is to quit parsing. The user may overrule this
# action by retuning NL_SKIP or NL_PROCEED (dangerous).
if cb.cb_set[NL_CB_OVERRUN]:
err = nl_cb_call(cb, NL_CB_OVERRUN, msg) # NL_CB_CALL(cb, NL_CB_OVERRUN, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
return -NLE_DUMP_INTR if interrupted else -NLE_MSG_OVERFLOW
elif hdr.nlmsg_type == NLMSG_ERROR:
# Message carries a nlmsgerr.
e = nlmsgerr(nlmsg_data(hdr))
if hdr.nlmsg_len < nlmsg_size(e.SIZEOF):
# Truncated error message, the default action is to stop parsing. The user may overrule this action
# by returning NL_SKIP or NL_PROCEED (dangerous).
if cb.cb_set[NL_CB_INVALID]:
err = nl_cb_call(cb, NL_CB_INVALID, msg) # NL_CB_CALL(cb, NL_CB_INVALID, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
return -NLE_DUMP_INTR if interrupted else -NLE_MSG_TRUNC
elif e.error:
# Error message reported back from kernel.
if cb.cb_err:
err = cb.cb_err(nla, e, cb.cb_err_arg)
if err < 0:
return -NLE_DUMP_INTR if interrupted else err
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else -nl_syserr2nlerr(e.error)
else:
return -NLE_DUMP_INTR if interrupted else -nl_syserr2nlerr(e.error)
elif cb.cb_set[NL_CB_ACK]:
err = nl_cb_call(cb, NL_CB_ACK, msg) # NL_CB_CALL(cb, NL_CB_ACK, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
# Valid message (not checking for MULTIPART bit to get along with broken kernels. NL_SKIP has no effect
# on this.
if cb.cb_set[NL_CB_VALID]:
err = nl_cb_call(cb, NL_CB_VALID, msg) # NL_CB_CALL(cb, NL_CB_VALID, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
hdr = nlmsg_next(hdr, n)
del buf[:]
creds = None
if multipart:
# Multipart message not yet complete, continue reading.
continue
err = 0
if interrupted:
return -NLE_DUMP_INTR
if not err:
err = nrecv
return err | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L775.
This is where callbacks are called.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
cb -- callbacks (nl_cb class instance).
Returns:
Number of bytes received or a negative error code. | Below is the the instruction that describes the task:
### Input:
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L775.
This is where callbacks are called.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
cb -- callbacks (nl_cb class instance).
Returns:
Number of bytes received or a negative error code.
### Response:
def recvmsgs(sk, cb):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L775.
This is where callbacks are called.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
cb -- callbacks (nl_cb class instance).
Returns:
Number of bytes received or a negative error code.
"""
multipart = 0
interrupted = 0
nrecv = 0
buf = bytearray()
# nla is passed on to not only to nl_recv() but may also be passed to a function pointer provided by the caller
# which may or may not initialize the variable. Thomas Graf.
nla = sockaddr_nl()
creds = ucred()
while True: # This is the `goto continue_reading` implementation.
_LOGGER.debug('Attempting to read from 0x%x', id(sk))
n = c_int(cb.cb_recv_ow(sk, nla, buf, creds) if cb.cb_recv_ow else nl_recv(sk, nla, buf, creds))
if n.value <= 0:
return n.value
_LOGGER.debug('recvmsgs(0x%x): Read %d bytes', id(sk), n.value)
hdr = nlmsghdr(bytearray_ptr(buf))
while nlmsg_ok(hdr, n):
_LOGGER.debug('recvmsgs(0x%x): Processing valid message...', id(sk))
msg = nlmsg_convert(hdr)
nlmsg_set_proto(msg, sk.s_proto)
nlmsg_set_src(msg, nla)
if creds:
raise NotImplementedError # nlmsg_set_creds(msg, creds)
nrecv += 1
# Raw callback is the first, it gives the most control to the user and he can do his very own parsing.
if cb.cb_set[NL_CB_MSG_IN]:
err = nl_cb_call(cb, NL_CB_MSG_IN, msg) # NL_CB_CALL(cb, NL_CB_MSG_IN, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
if cb.cb_set[NL_CB_SEQ_CHECK]:
# Sequence number checking. The check may be done by the user, otherwise a very simple check is applied
# enforcing strict ordering.
err = nl_cb_call(cb, NL_CB_SEQ_CHECK, msg) # NL_CB_CALL(cb, NL_CB_SEQ_CHECK, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
elif not sk.s_flags & NL_NO_AUTO_ACK:
# Only do sequence checking if auto-ack mode is enabled.
if hdr.nlmsg_seq != sk.s_seq_expect:
if cb.cb_set[NL_CB_INVALID]:
err = nl_cb_call(cb, NL_CB_INVALID, msg) # NL_CB_CALL(cb, NL_CB_INVALID, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
return -NLE_SEQ_MISMATCH
if hdr.nlmsg_type in (NLMSG_DONE, NLMSG_ERROR, NLMSG_NOOP, NLMSG_OVERRUN):
# We can't check for !NLM_F_MULTI since some Netlink users in the kernel are broken.
sk.s_seq_expect += 1
_LOGGER.debug('recvmsgs(0x%x): Increased expected sequence number to %d', id(sk), sk.s_seq_expect)
if hdr.nlmsg_flags & NLM_F_MULTI:
multipart = 1
if hdr.nlmsg_flags & NLM_F_DUMP_INTR:
if cb.cb_set[NL_CB_DUMP_INTR]:
err = nl_cb_call(cb, NL_CB_DUMP_INTR, msg) # NL_CB_CALL(cb, NL_CB_DUMP_INTR, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
# We have to continue reading to clear all messages until a NLMSG_DONE is received and report the
# inconsistency.
interrupted = 1
if hdr.nlmsg_flags & NLM_F_ACK:
# Other side wishes to see an ack for this message.
if cb.cb_set[NL_CB_SEND_ACK]:
err = nl_cb_call(cb, NL_CB_SEND_ACK, msg) # NL_CB_CALL(cb, NL_CB_SEND_ACK, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
if hdr.nlmsg_type == NLMSG_DONE:
# Messages terminates a multipart message, this is usually the end of a message and therefore we slip
# out of the loop by default. the user may overrule this action by skipping this packet.
multipart = 0
if cb.cb_set[NL_CB_FINISH]:
err = nl_cb_call(cb, NL_CB_FINISH, msg) # NL_CB_CALL(cb, NL_CB_FINISH, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
elif hdr.nlmsg_type == NLMSG_NOOP:
# Message to be ignored, the default action is to skip this message if no callback is specified. The
# user may overrule this action by returning NL_PROCEED.
if cb.cb_set[NL_CB_SKIPPED]:
err = nl_cb_call(cb, NL_CB_SKIPPED, msg) # NL_CB_CALL(cb, NL_CB_SKIPPED, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
hdr = nlmsg_next(hdr, n)
continue
elif hdr.nlmsg_type == NLMSG_OVERRUN:
# Data got lost, report back to user. The default action is to quit parsing. The user may overrule this
# action by retuning NL_SKIP or NL_PROCEED (dangerous).
if cb.cb_set[NL_CB_OVERRUN]:
err = nl_cb_call(cb, NL_CB_OVERRUN, msg) # NL_CB_CALL(cb, NL_CB_OVERRUN, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
return -NLE_DUMP_INTR if interrupted else -NLE_MSG_OVERFLOW
elif hdr.nlmsg_type == NLMSG_ERROR:
# Message carries a nlmsgerr.
e = nlmsgerr(nlmsg_data(hdr))
if hdr.nlmsg_len < nlmsg_size(e.SIZEOF):
# Truncated error message, the default action is to stop parsing. The user may overrule this action
# by returning NL_SKIP or NL_PROCEED (dangerous).
if cb.cb_set[NL_CB_INVALID]:
err = nl_cb_call(cb, NL_CB_INVALID, msg) # NL_CB_CALL(cb, NL_CB_INVALID, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
return -NLE_DUMP_INTR if interrupted else -NLE_MSG_TRUNC
elif e.error:
# Error message reported back from kernel.
if cb.cb_err:
err = cb.cb_err(nla, e, cb.cb_err_arg)
if err < 0:
return -NLE_DUMP_INTR if interrupted else err
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else -nl_syserr2nlerr(e.error)
else:
return -NLE_DUMP_INTR if interrupted else -nl_syserr2nlerr(e.error)
elif cb.cb_set[NL_CB_ACK]:
err = nl_cb_call(cb, NL_CB_ACK, msg) # NL_CB_CALL(cb, NL_CB_ACK, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
else:
# Valid message (not checking for MULTIPART bit to get along with broken kernels. NL_SKIP has no effect
# on this.
if cb.cb_set[NL_CB_VALID]:
err = nl_cb_call(cb, NL_CB_VALID, msg) # NL_CB_CALL(cb, NL_CB_VALID, msg)
if err == NL_OK:
pass
elif err == NL_SKIP:
hdr = nlmsg_next(hdr, n)
continue
elif err == NL_STOP:
return -NLE_DUMP_INTR if interrupted else nrecv
else:
return -NLE_DUMP_INTR if interrupted else (err or nrecv)
hdr = nlmsg_next(hdr, n)
del buf[:]
creds = None
if multipart:
# Multipart message not yet complete, continue reading.
continue
err = 0
if interrupted:
return -NLE_DUMP_INTR
if not err:
err = nrecv
return err |
def get_params_and_defaults(param_list, db):
"""
Deduce [parameter, default] pairs from simulations available in the db.
Args:
param_list (list): List of parameters to query for.
db (DatabaseManager): Database where to query for defaults.
"""
return [[p, d] for p, d in db.get_all_values_of_all_params().items()] | Deduce [parameter, default] pairs from simulations available in the db.
Args:
param_list (list): List of parameters to query for.
db (DatabaseManager): Database where to query for defaults. | Below is the the instruction that describes the task:
### Input:
Deduce [parameter, default] pairs from simulations available in the db.
Args:
param_list (list): List of parameters to query for.
db (DatabaseManager): Database where to query for defaults.
### Response:
def get_params_and_defaults(param_list, db):
"""
Deduce [parameter, default] pairs from simulations available in the db.
Args:
param_list (list): List of parameters to query for.
db (DatabaseManager): Database where to query for defaults.
"""
return [[p, d] for p, d in db.get_all_values_of_all_params().items()] |
def create_auth_manifest(**kwargs):
"""
Creates a basic authentication manifest for logging in, logging out and
registering new accounts.
"""
class AuthProgram(Program):
pre_input_middleware = [AuthenticationMiddleware]
def register(username, password, password2):
"""
Decorated version of basic_register with a callback added.
"""
result = basic_register(username, password, password2)
callback = kwargs.get('post_register_callback', None)
if callback:
user = User.objects.get(username=username)
callback(user)
return result
return Manifest({
'login': [
AuthProgram(
"""
Prints out the HTML form for logging in.
""",
name="Login (form)",
input_middleware=[NotAuthenticatedOrRedirect('/')],
view=BasicView(
html=jinja_template('login.html'),
),
),
AuthProgram(
"""
Matches up the username/password against the database, and adds the auth cookies.
""",
name="Login (post)",
input_middleware=[NotAuthenticatedOrDie],
controllers=['http-post', 'cmd'],
model=[create_session, {'username': 'mock_user', 'session_key': 'XXXXXXXXXXXXXXX'}],
view=BasicView(
persist=lambda m: {'giotto_session': m['session_key']},
html=lambda m: Redirection('/'),
),
),
],
'logout': AuthProgram(
"""
Send the user here to log them out. Removes their cookies and deletes the auth session.
""",
name="Logout",
view=BasicView(
html=Redirection('/'),
),
output_middleware=[LogoutMiddleware],
),
'register': [
AuthProgram(
"""
This program returns the HTML page with the form for registering a new account.
HTTP-get only.
""",
name="Register (form)",
input_middleware=[NotAuthenticatedOrRedirect('/')],
view=BasicView(
html=jinja_template('register.html'),
),
),
AuthProgram(
"""
When you POST the register form, this program handles creating the new user, then redirecting you to '/'
""",
name="Register (post)",
controllers=['http-post'],
model=[register],
view=BasicView(
persist=lambda m: {'giotto_session': m['session_key']},
html=lambda m: Redirection('/'),
),
),
],
}) | Creates a basic authentication manifest for logging in, logging out and
registering new accounts. | Below is the the instruction that describes the task:
### Input:
Creates a basic authentication manifest for logging in, logging out and
registering new accounts.
### Response:
def create_auth_manifest(**kwargs):
"""
Creates a basic authentication manifest for logging in, logging out and
registering new accounts.
"""
class AuthProgram(Program):
pre_input_middleware = [AuthenticationMiddleware]
def register(username, password, password2):
"""
Decorated version of basic_register with a callback added.
"""
result = basic_register(username, password, password2)
callback = kwargs.get('post_register_callback', None)
if callback:
user = User.objects.get(username=username)
callback(user)
return result
return Manifest({
'login': [
AuthProgram(
"""
Prints out the HTML form for logging in.
""",
name="Login (form)",
input_middleware=[NotAuthenticatedOrRedirect('/')],
view=BasicView(
html=jinja_template('login.html'),
),
),
AuthProgram(
"""
Matches up the username/password against the database, and adds the auth cookies.
""",
name="Login (post)",
input_middleware=[NotAuthenticatedOrDie],
controllers=['http-post', 'cmd'],
model=[create_session, {'username': 'mock_user', 'session_key': 'XXXXXXXXXXXXXXX'}],
view=BasicView(
persist=lambda m: {'giotto_session': m['session_key']},
html=lambda m: Redirection('/'),
),
),
],
'logout': AuthProgram(
"""
Send the user here to log them out. Removes their cookies and deletes the auth session.
""",
name="Logout",
view=BasicView(
html=Redirection('/'),
),
output_middleware=[LogoutMiddleware],
),
'register': [
AuthProgram(
"""
This program returns the HTML page with the form for registering a new account.
HTTP-get only.
""",
name="Register (form)",
input_middleware=[NotAuthenticatedOrRedirect('/')],
view=BasicView(
html=jinja_template('register.html'),
),
),
AuthProgram(
"""
When you POST the register form, this program handles creating the new user, then redirecting you to '/'
""",
name="Register (post)",
controllers=['http-post'],
model=[register],
view=BasicView(
persist=lambda m: {'giotto_session': m['session_key']},
html=lambda m: Redirection('/'),
),
),
],
}) |
def get_out_seg_vlan(cls, tenant_id):
"""Retrieves the OUT Seg, VLAN, mob domain. """
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return None, None
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_out_seg_vlan() | Retrieves the OUT Seg, VLAN, mob domain. | Below is the the instruction that describes the task:
### Input:
Retrieves the OUT Seg, VLAN, mob domain.
### Response:
def get_out_seg_vlan(cls, tenant_id):
"""Retrieves the OUT Seg, VLAN, mob domain. """
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return None, None
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_out_seg_vlan() |
def __extract_modules(self, loader, name, is_pkg):
""" if module found load module and save all attributes in the module found """
mod = loader.find_module(name).load_module(name)
""" find the attribute method on each module """
if hasattr(mod, '__method__'):
""" register to the blueprint if method attribute found """
module_router = ModuleRouter(mod,
ignore_names=self.__serialize_module_paths()
).register_route(app=self.application, name=name)
self.__routers.extend(module_router.routers)
self.__modules.append(mod)
else:
""" prompt not found notification """
# print('{} has no module attribute method'.format(mod))
pass | if module found load module and save all attributes in the module found | Below is the the instruction that describes the task:
### Input:
if module found load module and save all attributes in the module found
### Response:
def __extract_modules(self, loader, name, is_pkg):
""" if module found load module and save all attributes in the module found """
mod = loader.find_module(name).load_module(name)
""" find the attribute method on each module """
if hasattr(mod, '__method__'):
""" register to the blueprint if method attribute found """
module_router = ModuleRouter(mod,
ignore_names=self.__serialize_module_paths()
).register_route(app=self.application, name=name)
self.__routers.extend(module_router.routers)
self.__modules.append(mod)
else:
""" prompt not found notification """
# print('{} has no module attribute method'.format(mod))
pass |
def remove_optional(annotation: type):
"""
Optional[X] annotations are actually represented as Union[X, NoneType].
For our purposes, the "Optional" part is not interesting, so here we
throw it away.
"""
origin = getattr(annotation, '__origin__', None)
args = getattr(annotation, '__args__', ())
if origin == Union and len(args) == 2 and args[1] == type(None):
return args[0]
else:
return annotation | Optional[X] annotations are actually represented as Union[X, NoneType].
For our purposes, the "Optional" part is not interesting, so here we
throw it away. | Below is the the instruction that describes the task:
### Input:
Optional[X] annotations are actually represented as Union[X, NoneType].
For our purposes, the "Optional" part is not interesting, so here we
throw it away.
### Response:
def remove_optional(annotation: type):
"""
Optional[X] annotations are actually represented as Union[X, NoneType].
For our purposes, the "Optional" part is not interesting, so here we
throw it away.
"""
origin = getattr(annotation, '__origin__', None)
args = getattr(annotation, '__args__', ())
if origin == Union and len(args) == 2 and args[1] == type(None):
return args[0]
else:
return annotation |
def hash(self):
"""Generate a hash value."""
h = hash_pandas_object(self, index=True)
return hashlib.md5(h.values.tobytes()).hexdigest() | Generate a hash value. | Below is the the instruction that describes the task:
### Input:
Generate a hash value.
### Response:
def hash(self):
"""Generate a hash value."""
h = hash_pandas_object(self, index=True)
return hashlib.md5(h.values.tobytes()).hexdigest() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.