code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def f_df_nc(self,x):
'''
Wrapper of the derivative of *f*: takes an input x with size of the not
fixed dimensions expands it and evaluates the gradient of the entire function.
'''
x = np.atleast_2d(x)
xx = self.context_manager._expand_vector(x)
f_nocontext_xx , df_nocontext_xx = self.f_df(xx)
df_nocontext_xx = df_nocontext_xx[:,np.array(self.context_manager.noncontext_index)]
return f_nocontext_xx, df_nocontext_xx | Wrapper of the derivative of *f*: takes an input x with size of the not
fixed dimensions expands it and evaluates the gradient of the entire function. | Below is the the instruction that describes the task:
### Input:
Wrapper of the derivative of *f*: takes an input x with size of the not
fixed dimensions expands it and evaluates the gradient of the entire function.
### Response:
def f_df_nc(self,x):
'''
Wrapper of the derivative of *f*: takes an input x with size of the not
fixed dimensions expands it and evaluates the gradient of the entire function.
'''
x = np.atleast_2d(x)
xx = self.context_manager._expand_vector(x)
f_nocontext_xx , df_nocontext_xx = self.f_df(xx)
df_nocontext_xx = df_nocontext_xx[:,np.array(self.context_manager.noncontext_index)]
return f_nocontext_xx, df_nocontext_xx |
def transmit(self, payload, **kwargs):
"""
Transmit content metadata items to the integrated channel.
"""
items_to_create, items_to_update, items_to_delete, transmission_map = self._partition_items(payload)
self._prepare_items_for_delete(items_to_delete)
prepared_items = {}
prepared_items.update(items_to_create)
prepared_items.update(items_to_update)
prepared_items.update(items_to_delete)
skip_metadata_transmission = False
for chunk in chunks(prepared_items, self.enterprise_configuration.transmission_chunk_size):
chunked_items = list(chunk.values())
if skip_metadata_transmission:
# Remove the failed items from the create/update/delete dictionaries,
# so ContentMetadataItemTransmission objects are not synchronized for
# these items below.
self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete)
else:
try:
self.client.update_content_metadata(self._serialize_items(chunked_items))
except ClientError as exc:
LOGGER.error(
'Failed to update [%s] content metadata items for integrated channel [%s] [%s]',
len(chunked_items),
self.enterprise_configuration.enterprise_customer.name,
self.enterprise_configuration.channel_code,
)
LOGGER.error(exc)
# Remove the failed items from the create/update/delete dictionaries,
# so ContentMetadataItemTransmission objects are not synchronized for
# these items below.
self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete)
# SAP servers throttle incoming traffic, If a request fails than the subsequent would fail too,
# So, no need to keep trying and failing. We should stop here and retry later.
skip_metadata_transmission = True
self._create_transmissions(items_to_create)
self._update_transmissions(items_to_update, transmission_map)
self._delete_transmissions(items_to_delete.keys()) | Transmit content metadata items to the integrated channel. | Below is the the instruction that describes the task:
### Input:
Transmit content metadata items to the integrated channel.
### Response:
def transmit(self, payload, **kwargs):
"""
Transmit content metadata items to the integrated channel.
"""
items_to_create, items_to_update, items_to_delete, transmission_map = self._partition_items(payload)
self._prepare_items_for_delete(items_to_delete)
prepared_items = {}
prepared_items.update(items_to_create)
prepared_items.update(items_to_update)
prepared_items.update(items_to_delete)
skip_metadata_transmission = False
for chunk in chunks(prepared_items, self.enterprise_configuration.transmission_chunk_size):
chunked_items = list(chunk.values())
if skip_metadata_transmission:
# Remove the failed items from the create/update/delete dictionaries,
# so ContentMetadataItemTransmission objects are not synchronized for
# these items below.
self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete)
else:
try:
self.client.update_content_metadata(self._serialize_items(chunked_items))
except ClientError as exc:
LOGGER.error(
'Failed to update [%s] content metadata items for integrated channel [%s] [%s]',
len(chunked_items),
self.enterprise_configuration.enterprise_customer.name,
self.enterprise_configuration.channel_code,
)
LOGGER.error(exc)
# Remove the failed items from the create/update/delete dictionaries,
# so ContentMetadataItemTransmission objects are not synchronized for
# these items below.
self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete)
# SAP servers throttle incoming traffic, If a request fails than the subsequent would fail too,
# So, no need to keep trying and failing. We should stop here and retry later.
skip_metadata_transmission = True
self._create_transmissions(items_to_create)
self._update_transmissions(items_to_update, transmission_map)
self._delete_transmissions(items_to_delete.keys()) |
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec.
"""
if "Authorization" in prepared_request.headers:
# If we get redirected to a new host, we should strip out
# any authentication headers.
prepared_request.headers.pop("Authorization", True)
prepared_request.prepare_auth(self.auth)
return | When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec. | Below is the the instruction that describes the task:
### Input:
When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec.
### Response:
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we should always strip Authorization
header, since nonce may not be reused as per OAuth spec.
"""
if "Authorization" in prepared_request.headers:
# If we get redirected to a new host, we should strip out
# any authentication headers.
prepared_request.headers.pop("Authorization", True)
prepared_request.prepare_auth(self.auth)
return |
def _delete_forever_values(self, forever_key):
"""
Delete all of the keys that have been stored forever.
:type forever_key: str
"""
forever = self._store.connection().lrange(forever_key, 0, -1)
if len(forever) > 0:
self._store.connection().delete(*forever) | Delete all of the keys that have been stored forever.
:type forever_key: str | Below is the the instruction that describes the task:
### Input:
Delete all of the keys that have been stored forever.
:type forever_key: str
### Response:
def _delete_forever_values(self, forever_key):
"""
Delete all of the keys that have been stored forever.
:type forever_key: str
"""
forever = self._store.connection().lrange(forever_key, 0, -1)
if len(forever) > 0:
self._store.connection().delete(*forever) |
def calculate_average_diameter(self, **kwargs):
"""
Return the average diamension of a molecule.
Returns
-------
:class:`float`
The average dimension of the molecule.
"""
self.average_diameter = find_average_diameter(
self.elements, self.coordinates, **kwargs)
return self.average_diameter | Return the average diamension of a molecule.
Returns
-------
:class:`float`
The average dimension of the molecule. | Below is the the instruction that describes the task:
### Input:
Return the average diamension of a molecule.
Returns
-------
:class:`float`
The average dimension of the molecule.
### Response:
def calculate_average_diameter(self, **kwargs):
"""
Return the average diamension of a molecule.
Returns
-------
:class:`float`
The average dimension of the molecule.
"""
self.average_diameter = find_average_diameter(
self.elements, self.coordinates, **kwargs)
return self.average_diameter |
def create_message(payload, key=None):
"""
Construct a :class:`Message`
:param payload: The payload to send to Kafka.
:type payload: :class:`bytes` or ``None``
:param key: A key used to route the message when partitioning and to
determine message identity on a compacted topic.
:type key: :class:`bytes` or ``None``
"""
assert payload is None or isinstance(payload, bytes), 'payload={!r} should be bytes or None'.format(payload)
assert key is None or isinstance(key, bytes), 'key={!r} should be bytes or None'.format(key)
return Message(0, 0, key, payload) | Construct a :class:`Message`
:param payload: The payload to send to Kafka.
:type payload: :class:`bytes` or ``None``
:param key: A key used to route the message when partitioning and to
determine message identity on a compacted topic.
:type key: :class:`bytes` or ``None`` | Below is the the instruction that describes the task:
### Input:
Construct a :class:`Message`
:param payload: The payload to send to Kafka.
:type payload: :class:`bytes` or ``None``
:param key: A key used to route the message when partitioning and to
determine message identity on a compacted topic.
:type key: :class:`bytes` or ``None``
### Response:
def create_message(payload, key=None):
"""
Construct a :class:`Message`
:param payload: The payload to send to Kafka.
:type payload: :class:`bytes` or ``None``
:param key: A key used to route the message when partitioning and to
determine message identity on a compacted topic.
:type key: :class:`bytes` or ``None``
"""
assert payload is None or isinstance(payload, bytes), 'payload={!r} should be bytes or None'.format(payload)
assert key is None or isinstance(key, bytes), 'key={!r} should be bytes or None'.format(key)
return Message(0, 0, key, payload) |
def filter_ribo_counts(counts, orf_start=None, orf_stop=None):
"""Filter read counts and return only upstream of orf_start or downstream
of orf_stop.
Keyword arguments:
counts -- Ribo-Seq read counts obtained from get_ribo_counts.
orf_start -- Start position of the longest ORF.
orf_stop -- Stop position of the longest ORF.
"""
filtered_counts = dict.copy(counts)
for position in counts:
if orf_start and orf_stop:
# if only upstream and downstream reads are required, check if
# current position is upstream or downstream of the ORF start/stop
# if not, remove from counts
if (position > orf_start and position < orf_stop):
filtered_counts.pop(position)
elif orf_start:
# check if current position is upstream of ORF start. if not, remove
if position >= orf_start:
filtered_counts.pop(position)
elif orf_stop:
# check if current position is downstream of ORF stop. If not,
# remove
if position <= orf_stop:
filtered_counts.pop(position)
# calculate total reads for this transcript
total_reads = sum(sum(item.values()) for item in filtered_counts.values())
return filtered_counts, total_reads | Filter read counts and return only upstream of orf_start or downstream
of orf_stop.
Keyword arguments:
counts -- Ribo-Seq read counts obtained from get_ribo_counts.
orf_start -- Start position of the longest ORF.
orf_stop -- Stop position of the longest ORF. | Below is the the instruction that describes the task:
### Input:
Filter read counts and return only upstream of orf_start or downstream
of orf_stop.
Keyword arguments:
counts -- Ribo-Seq read counts obtained from get_ribo_counts.
orf_start -- Start position of the longest ORF.
orf_stop -- Stop position of the longest ORF.
### Response:
def filter_ribo_counts(counts, orf_start=None, orf_stop=None):
"""Filter read counts and return only upstream of orf_start or downstream
of orf_stop.
Keyword arguments:
counts -- Ribo-Seq read counts obtained from get_ribo_counts.
orf_start -- Start position of the longest ORF.
orf_stop -- Stop position of the longest ORF.
"""
filtered_counts = dict.copy(counts)
for position in counts:
if orf_start and orf_stop:
# if only upstream and downstream reads are required, check if
# current position is upstream or downstream of the ORF start/stop
# if not, remove from counts
if (position > orf_start and position < orf_stop):
filtered_counts.pop(position)
elif orf_start:
# check if current position is upstream of ORF start. if not, remove
if position >= orf_start:
filtered_counts.pop(position)
elif orf_stop:
# check if current position is downstream of ORF stop. If not,
# remove
if position <= orf_stop:
filtered_counts.pop(position)
# calculate total reads for this transcript
total_reads = sum(sum(item.values()) for item in filtered_counts.values())
return filtered_counts, total_reads |
def func_args(func)->bool:
"Return the arguments of `func`."
code = func.__code__
return code.co_varnames[:code.co_argcount] | Return the arguments of `func`. | Below is the the instruction that describes the task:
### Input:
Return the arguments of `func`.
### Response:
def func_args(func)->bool:
"Return the arguments of `func`."
code = func.__code__
return code.co_varnames[:code.co_argcount] |
async def _load_tuple(self, reader, elem_type, params=None, elem=None):
"""
Loads tuple of elements from the reader. Supports the tuple ref.
Returns loaded tuple.
:param reader:
:param elem_type:
:param params:
:param container:
:return:
"""
c_len = await load_uvarint(reader)
if elem and c_len != len(elem):
raise ValueError("Size mismatch")
if c_len != len(elem_type.f_specs()):
raise ValueError("Tuple size mismatch")
elem_fields = params[0] if params else None
if elem_fields is None:
elem_fields = elem_type.f_specs()
res = elem if elem else []
for i in range(c_len):
try:
self.tracker.push_index(i)
fvalue = await self.load_field(
reader,
elem_fields[i],
params[1:] if params else None,
eref(res, i) if elem else None,
)
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
if not elem:
res.append(fvalue)
return res | Loads tuple of elements from the reader. Supports the tuple ref.
Returns loaded tuple.
:param reader:
:param elem_type:
:param params:
:param container:
:return: | Below is the the instruction that describes the task:
### Input:
Loads tuple of elements from the reader. Supports the tuple ref.
Returns loaded tuple.
:param reader:
:param elem_type:
:param params:
:param container:
:return:
### Response:
async def _load_tuple(self, reader, elem_type, params=None, elem=None):
"""
Loads tuple of elements from the reader. Supports the tuple ref.
Returns loaded tuple.
:param reader:
:param elem_type:
:param params:
:param container:
:return:
"""
c_len = await load_uvarint(reader)
if elem and c_len != len(elem):
raise ValueError("Size mismatch")
if c_len != len(elem_type.f_specs()):
raise ValueError("Tuple size mismatch")
elem_fields = params[0] if params else None
if elem_fields is None:
elem_fields = elem_type.f_specs()
res = elem if elem else []
for i in range(c_len):
try:
self.tracker.push_index(i)
fvalue = await self.load_field(
reader,
elem_fields[i],
params[1:] if params else None,
eref(res, i) if elem else None,
)
self.tracker.pop()
except Exception as e:
raise helpers.ArchiveException(e, tracker=self.tracker) from e
if not elem:
res.append(fvalue)
return res |
def clean_tarinfo(cls, tar_info):
"""
Clean metadata from a TarInfo object to make it more reproducible.
- Set uid & gid to 0
- Set uname and gname to ""
- Normalise permissions to 644 or 755
- Set mtime if not None
"""
ti = copy(tar_info)
ti.uid = 0
ti.gid = 0
ti.uname = ""
ti.gname = ""
ti.mode = normalize_file_permissions(ti.mode)
return ti | Clean metadata from a TarInfo object to make it more reproducible.
- Set uid & gid to 0
- Set uname and gname to ""
- Normalise permissions to 644 or 755
- Set mtime if not None | Below is the the instruction that describes the task:
### Input:
Clean metadata from a TarInfo object to make it more reproducible.
- Set uid & gid to 0
- Set uname and gname to ""
- Normalise permissions to 644 or 755
- Set mtime if not None
### Response:
def clean_tarinfo(cls, tar_info):
"""
Clean metadata from a TarInfo object to make it more reproducible.
- Set uid & gid to 0
- Set uname and gname to ""
- Normalise permissions to 644 or 755
- Set mtime if not None
"""
ti = copy(tar_info)
ti.uid = 0
ti.gid = 0
ti.uname = ""
ti.gname = ""
ti.mode = normalize_file_permissions(ti.mode)
return ti |
def sort_rows(self, rows, section):
"""Sort the rows, as appropriate for the section.
:param rows: List of tuples (all same length, same values in each position)
:param section: Name of section, should match const in Differ class
:return: None; rows are sorted in-place
"""
#print("@@ SORT ROWS:\n{}".format(rows))
# Section-specific determination of sort key
if section.lower() == Differ.CHANGED.lower():
sort_key = Differ.CHANGED_DELTA
else:
sort_key = None
if sort_key is not None:
rows.sort(key=itemgetter(sort_key)) | Sort the rows, as appropriate for the section.
:param rows: List of tuples (all same length, same values in each position)
:param section: Name of section, should match const in Differ class
:return: None; rows are sorted in-place | Below is the the instruction that describes the task:
### Input:
Sort the rows, as appropriate for the section.
:param rows: List of tuples (all same length, same values in each position)
:param section: Name of section, should match const in Differ class
:return: None; rows are sorted in-place
### Response:
def sort_rows(self, rows, section):
"""Sort the rows, as appropriate for the section.
:param rows: List of tuples (all same length, same values in each position)
:param section: Name of section, should match const in Differ class
:return: None; rows are sorted in-place
"""
#print("@@ SORT ROWS:\n{}".format(rows))
# Section-specific determination of sort key
if section.lower() == Differ.CHANGED.lower():
sort_key = Differ.CHANGED_DELTA
else:
sort_key = None
if sort_key is not None:
rows.sort(key=itemgetter(sort_key)) |
def actually_possibly_award(self, **state):
"""
Does the actual work of possibly awarding a badge.
"""
user = state["user"]
force_timestamp = state.pop("force_timestamp", None)
awarded = self.award(**state)
if awarded is None:
return
if awarded.level is None:
assert len(self.levels) == 1
awarded.level = 1
# awarded levels are 1 indexed, for conveineince
awarded = awarded.level - 1
assert awarded < len(self.levels)
if (
not self.multiple and
BadgeAward.objects.filter(user=user, slug=self.slug, level=awarded)
):
return
extra_kwargs = {}
if force_timestamp is not None:
extra_kwargs["awarded_at"] = force_timestamp
badge = BadgeAward.objects.create(
user=user,
slug=self.slug,
level=awarded,
**extra_kwargs
)
self.send_badge_messages(badge)
badge_awarded.send(sender=self, badge_award=badge) | Does the actual work of possibly awarding a badge. | Below is the the instruction that describes the task:
### Input:
Does the actual work of possibly awarding a badge.
### Response:
def actually_possibly_award(self, **state):
"""
Does the actual work of possibly awarding a badge.
"""
user = state["user"]
force_timestamp = state.pop("force_timestamp", None)
awarded = self.award(**state)
if awarded is None:
return
if awarded.level is None:
assert len(self.levels) == 1
awarded.level = 1
# awarded levels are 1 indexed, for conveineince
awarded = awarded.level - 1
assert awarded < len(self.levels)
if (
not self.multiple and
BadgeAward.objects.filter(user=user, slug=self.slug, level=awarded)
):
return
extra_kwargs = {}
if force_timestamp is not None:
extra_kwargs["awarded_at"] = force_timestamp
badge = BadgeAward.objects.create(
user=user,
slug=self.slug,
level=awarded,
**extra_kwargs
)
self.send_badge_messages(badge)
badge_awarded.send(sender=self, badge_award=badge) |
def walk(self):
"""
Default file path retrieval function.
sprinter() - Generates file path list using pool processing and Queues
crawler() - Generates file path list using os.walk() in sequence
"""
if self.parallelize:
self.filepaths = Sprinter(self.directory, self.filters, self.full_paths, self.pool_size, self._printer).sprinter()
else:
self.filepaths = Crawler(self.directory, self.filters, self.full_paths, self.topdown, self._printer).crawler()
return self._get_filepaths() | Default file path retrieval function.
sprinter() - Generates file path list using pool processing and Queues
crawler() - Generates file path list using os.walk() in sequence | Below is the the instruction that describes the task:
### Input:
Default file path retrieval function.
sprinter() - Generates file path list using pool processing and Queues
crawler() - Generates file path list using os.walk() in sequence
### Response:
def walk(self):
"""
Default file path retrieval function.
sprinter() - Generates file path list using pool processing and Queues
crawler() - Generates file path list using os.walk() in sequence
"""
if self.parallelize:
self.filepaths = Sprinter(self.directory, self.filters, self.full_paths, self.pool_size, self._printer).sprinter()
else:
self.filepaths = Crawler(self.directory, self.filters, self.full_paths, self.topdown, self._printer).crawler()
return self._get_filepaths() |
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
bodylist = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(bodylist)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause')) | Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Below is the the instruction that describes the task:
### Input:
Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
### Response:
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
bodylist = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(bodylist)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause')) |
def add_swagger_api_route(app, target_route, swagger_json_route):
"""
mount a swagger statics page.
app: the flask app object
target_route: the path to mount the statics page.
swagger_json_route: the path where the swagger json definitions is
expected to be.
"""
static_root = get_swagger_static_root()
swagger_body = generate_swagger_html(
STATIC_ROOT, swagger_json_route
).encode("utf-8")
def swagger_ui():
return Response(swagger_body, content_type="text/html")
blueprint = Blueprint('swagger', __name__,
static_url_path=STATIC_ROOT,
static_folder=static_root)
app.route(target_route)(swagger_ui)
app.register_blueprint(blueprint) | mount a swagger statics page.
app: the flask app object
target_route: the path to mount the statics page.
swagger_json_route: the path where the swagger json definitions is
expected to be. | Below is the the instruction that describes the task:
### Input:
mount a swagger statics page.
app: the flask app object
target_route: the path to mount the statics page.
swagger_json_route: the path where the swagger json definitions is
expected to be.
### Response:
def add_swagger_api_route(app, target_route, swagger_json_route):
"""
mount a swagger statics page.
app: the flask app object
target_route: the path to mount the statics page.
swagger_json_route: the path where the swagger json definitions is
expected to be.
"""
static_root = get_swagger_static_root()
swagger_body = generate_swagger_html(
STATIC_ROOT, swagger_json_route
).encode("utf-8")
def swagger_ui():
return Response(swagger_body, content_type="text/html")
blueprint = Blueprint('swagger', __name__,
static_url_path=STATIC_ROOT,
static_folder=static_root)
app.route(target_route)(swagger_ui)
app.register_blueprint(blueprint) |
def log(self, message, level=logging.DEBUG):
"""
Logs the message in the root logger with the log level
@param message: Message to be logged
@type message: string
@param level: Log level, defaul DEBUG
@type level: integer
@return: 1 on success and 0 on error
@rtype: integer
"""
if _ldtp_debug:
print(message)
self.logger.log(level, str(message))
return 1 | Logs the message in the root logger with the log level
@param message: Message to be logged
@type message: string
@param level: Log level, defaul DEBUG
@type level: integer
@return: 1 on success and 0 on error
@rtype: integer | Below is the the instruction that describes the task:
### Input:
Logs the message in the root logger with the log level
@param message: Message to be logged
@type message: string
@param level: Log level, defaul DEBUG
@type level: integer
@return: 1 on success and 0 on error
@rtype: integer
### Response:
def log(self, message, level=logging.DEBUG):
"""
Logs the message in the root logger with the log level
@param message: Message to be logged
@type message: string
@param level: Log level, defaul DEBUG
@type level: integer
@return: 1 on success and 0 on error
@rtype: integer
"""
if _ldtp_debug:
print(message)
self.logger.log(level, str(message))
return 1 |
def set_property_value(self, name, value, dry_run=False):
"""Set or remove property value.
See DAVResource.set_property_value()
"""
raise DAVError(
HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty
) | Set or remove property value.
See DAVResource.set_property_value() | Below is the the instruction that describes the task:
### Input:
Set or remove property value.
See DAVResource.set_property_value()
### Response:
def set_property_value(self, name, value, dry_run=False):
"""Set or remove property value.
See DAVResource.set_property_value()
"""
raise DAVError(
HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty
) |
def _get_connection(self):
"""
Returns our cached LDAPObject, which may or may not be bound.
"""
if self._connection is None:
uri = self.settings.SERVER_URI
if callable(uri):
if func_supports_parameter(uri, "request"):
uri = uri(self._request)
else:
warnings.warn(
"Update AUTH_LDAP_SERVER_URI callable %s.%s to accept "
"a positional `request` argument. Support for callables "
"accepting no arguments will be removed in a future "
"version." % (uri.__module__, uri.__name__),
DeprecationWarning,
)
uri = uri()
self._connection = self.backend.ldap.initialize(uri, bytes_mode=False)
for opt, value in self.settings.CONNECTION_OPTIONS.items():
self._connection.set_option(opt, value)
if self.settings.START_TLS:
logger.debug("Initiating TLS")
self._connection.start_tls_s()
return self._connection | Returns our cached LDAPObject, which may or may not be bound. | Below is the the instruction that describes the task:
### Input:
Returns our cached LDAPObject, which may or may not be bound.
### Response:
def _get_connection(self):
"""
Returns our cached LDAPObject, which may or may not be bound.
"""
if self._connection is None:
uri = self.settings.SERVER_URI
if callable(uri):
if func_supports_parameter(uri, "request"):
uri = uri(self._request)
else:
warnings.warn(
"Update AUTH_LDAP_SERVER_URI callable %s.%s to accept "
"a positional `request` argument. Support for callables "
"accepting no arguments will be removed in a future "
"version." % (uri.__module__, uri.__name__),
DeprecationWarning,
)
uri = uri()
self._connection = self.backend.ldap.initialize(uri, bytes_mode=False)
for opt, value in self.settings.CONNECTION_OPTIONS.items():
self._connection.set_option(opt, value)
if self.settings.START_TLS:
logger.debug("Initiating TLS")
self._connection.start_tls_s()
return self._connection |
def start_tunnel(self, local_port, remote_address, remote_port):
"""
Start ssh forward tunnel
:type local_port: int
:param local_port: local port binding for ssh tunnel
:type remote_address: str
:param remote_address: remote tunnel endpoint bind address
:type remote_port: int
:param remote_port: remote tunnel endpoint bind port
"""
self.tunnel.start(local_port, remote_address, remote_port)
self.tunnel_port = local_port | Start ssh forward tunnel
:type local_port: int
:param local_port: local port binding for ssh tunnel
:type remote_address: str
:param remote_address: remote tunnel endpoint bind address
:type remote_port: int
:param remote_port: remote tunnel endpoint bind port | Below is the the instruction that describes the task:
### Input:
Start ssh forward tunnel
:type local_port: int
:param local_port: local port binding for ssh tunnel
:type remote_address: str
:param remote_address: remote tunnel endpoint bind address
:type remote_port: int
:param remote_port: remote tunnel endpoint bind port
### Response:
def start_tunnel(self, local_port, remote_address, remote_port):
"""
Start ssh forward tunnel
:type local_port: int
:param local_port: local port binding for ssh tunnel
:type remote_address: str
:param remote_address: remote tunnel endpoint bind address
:type remote_port: int
:param remote_port: remote tunnel endpoint bind port
"""
self.tunnel.start(local_port, remote_address, remote_port)
self.tunnel_port = local_port |
def reset(self):
"""Expand to the full scale"""
import ephem, MOPcoord
sun=ephem.Sun()
sun.compute(self.date.get())
self.sun=MOPcoord.coord((sun.ra,sun.dec))
doplot(kbos)
self.plot_pointings() | Expand to the full scale | Below is the the instruction that describes the task:
### Input:
Expand to the full scale
### Response:
def reset(self):
"""Expand to the full scale"""
import ephem, MOPcoord
sun=ephem.Sun()
sun.compute(self.date.get())
self.sun=MOPcoord.coord((sun.ra,sun.dec))
doplot(kbos)
self.plot_pointings() |
def get_schedules(profile='pagerduty', subdomain=None, api_key=None):
'''
List schedules belonging to this account
CLI Example:
salt myminion pagerduty.get_schedules
'''
return _list_items(
'schedules',
'id',
profile=profile,
subdomain=subdomain,
api_key=api_key,
) | List schedules belonging to this account
CLI Example:
salt myminion pagerduty.get_schedules | Below is the the instruction that describes the task:
### Input:
List schedules belonging to this account
CLI Example:
salt myminion pagerduty.get_schedules
### Response:
def get_schedules(profile='pagerduty', subdomain=None, api_key=None):
'''
List schedules belonging to this account
CLI Example:
salt myminion pagerduty.get_schedules
'''
return _list_items(
'schedules',
'id',
profile=profile,
subdomain=subdomain,
api_key=api_key,
) |
def get_shape(self, ds_id, ds_info):
"""Return data array shape for item specified.
"""
var_path = ds_info.get('file_key', '{}'.format(ds_id.name))
if var_path + '/shape' not in self:
# loading a scalar value
shape = 1
else:
shape = self[var_path + "/shape"]
if "index" in ds_info:
shape = shape[1:]
if "pressure_index" in ds_info:
shape = shape[:-1]
return shape | Return data array shape for item specified. | Below is the the instruction that describes the task:
### Input:
Return data array shape for item specified.
### Response:
def get_shape(self, ds_id, ds_info):
"""Return data array shape for item specified.
"""
var_path = ds_info.get('file_key', '{}'.format(ds_id.name))
if var_path + '/shape' not in self:
# loading a scalar value
shape = 1
else:
shape = self[var_path + "/shape"]
if "index" in ds_info:
shape = shape[1:]
if "pressure_index" in ds_info:
shape = shape[:-1]
return shape |
def autoconvert(s):
"""Convert input to a numerical type if possible.
1. A non-string object is returned as it is
2. Try conversion to int, float, str.
"""
if type(s) is not str:
return s
for converter in int, float, str: # try them in increasing order of lenience
try:
s = [converter(i) for i in s.split()]
if len(s) == 1:
return s[0]
else:
return numpy.array(s)
except (ValueError, AttributeError):
pass
raise ValueError("Failed to autoconvert {0!r}".format(s)) | Convert input to a numerical type if possible.
1. A non-string object is returned as it is
2. Try conversion to int, float, str. | Below is the the instruction that describes the task:
### Input:
Convert input to a numerical type if possible.
1. A non-string object is returned as it is
2. Try conversion to int, float, str.
### Response:
def autoconvert(s):
"""Convert input to a numerical type if possible.
1. A non-string object is returned as it is
2. Try conversion to int, float, str.
"""
if type(s) is not str:
return s
for converter in int, float, str: # try them in increasing order of lenience
try:
s = [converter(i) for i in s.split()]
if len(s) == 1:
return s[0]
else:
return numpy.array(s)
except (ValueError, AttributeError):
pass
raise ValueError("Failed to autoconvert {0!r}".format(s)) |
def delta(self):
"""
Returns a delta based on this widget's information.
:return <datetime.timedelta>
"""
number = self._numberSpinner.value()
unit = self._unitCombo.currentText()
direction = self._directionCombo.currentText()
# use past tense
if direction == 'ago':
number = -number
if unit == 'year(s)':
return datetime.timedelta(number * 365)
elif unit == 'month(s)':
return datetime.timedelta(number * 30)
elif unit == 'week(s)':
return datetime.timedelta(number * 7)
elif unit == 'day(s)':
return datetime.timedelta(number)
elif unit == 'hour(s)':
return datetime.timedelta(0, number * 3600)
elif unit == 'minute(s)':
return datetime.timedelta(0, number * 60)
else:
return datetime.timedelta(0, number) | Returns a delta based on this widget's information.
:return <datetime.timedelta> | Below is the the instruction that describes the task:
### Input:
Returns a delta based on this widget's information.
:return <datetime.timedelta>
### Response:
def delta(self):
"""
Returns a delta based on this widget's information.
:return <datetime.timedelta>
"""
number = self._numberSpinner.value()
unit = self._unitCombo.currentText()
direction = self._directionCombo.currentText()
# use past tense
if direction == 'ago':
number = -number
if unit == 'year(s)':
return datetime.timedelta(number * 365)
elif unit == 'month(s)':
return datetime.timedelta(number * 30)
elif unit == 'week(s)':
return datetime.timedelta(number * 7)
elif unit == 'day(s)':
return datetime.timedelta(number)
elif unit == 'hour(s)':
return datetime.timedelta(0, number * 3600)
elif unit == 'minute(s)':
return datetime.timedelta(0, number * 60)
else:
return datetime.timedelta(0, number) |
def t_text(self, t):
r':\s*<text>'
t.lexer.text_start = t.lexer.lexpos - len('<text>')
t.lexer.begin('text') | r':\s*<text> | Below is the the instruction that describes the task:
### Input:
r':\s*<text>
### Response:
def t_text(self, t):
r':\s*<text>'
t.lexer.text_start = t.lexer.lexpos - len('<text>')
t.lexer.begin('text') |
def create_incidence_matrix(self, weights=None, fmt='coo',
drop_zeros=False):
r"""
Creates a weighted incidence matrix in the desired sparse format
Parameters
----------
weights : array_like, optional
An array containing the throat values to enter into the matrix (In
graph theory these are known as the 'weights'). If omitted, ones
are used to create a standard incidence matrix representing
connectivity only.
fmt : string, optional
The sparse storage format to return. Options are:
**'coo'** : (default) This is the native format of OpenPNMs data
**'lil'** : Enables row-wise slice of the matrix
**'csr'** : Favored by most linear algebra routines
**'dok'** : Enables subscript access of locations
drop_zeros : boolean (default is ``False``)
If ``True``, applies the ``eliminate_zeros`` method of the sparse
array to remove all zero locations.
Returns
-------
An incidence matrix in the specified sparse format
Notes
-----
The incidence matrix is a cousin to the adjacency matrix, and used by
OpenPNM for finding the throats connected to a give pore or set of
pores. Specifically, an incidence matrix has Np rows and Nt columns,
and each row represents a pore, containing non-zero values at the
locations corresponding to the indices of the throats connected to that
pore. The ``weights`` argument indicates what value to place at each
location, with the default being 1's to simply indicate connections.
Another useful option is throat indices, such that the data values
on each row indicate which throats are connected to the pore, though
this is redundant as it is identical to the locations of non-zeros.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> weights = sp.rand(pn.num_throats(), ) < 0.5
>>> im = pn.create_incidence_matrix(weights=weights, fmt='csr')
"""
# Check if provided data is valid
if weights is None:
weights = sp.ones((self.Nt,), dtype=int)
elif sp.shape(weights)[0] != self.Nt:
raise Exception('Received dataset of incorrect length')
conn = self['throat.conns']
row = conn[:, 0]
row = sp.append(row, conn[:, 1])
col = sp.arange(self.Nt)
col = sp.append(col, col)
weights = sp.append(weights, weights)
temp = sprs.coo.coo_matrix((weights, (row, col)), (self.Np, self.Nt))
if drop_zeros:
temp.eliminate_zeros()
# Convert to requested format
if fmt == 'coo':
pass # temp is already in coo format
elif fmt == 'csr':
temp = temp.tocsr()
elif fmt == 'lil':
temp = temp.tolil()
elif fmt == 'dok':
temp = temp.todok()
return temp | r"""
Creates a weighted incidence matrix in the desired sparse format
Parameters
----------
weights : array_like, optional
An array containing the throat values to enter into the matrix (In
graph theory these are known as the 'weights'). If omitted, ones
are used to create a standard incidence matrix representing
connectivity only.
fmt : string, optional
The sparse storage format to return. Options are:
**'coo'** : (default) This is the native format of OpenPNMs data
**'lil'** : Enables row-wise slice of the matrix
**'csr'** : Favored by most linear algebra routines
**'dok'** : Enables subscript access of locations
drop_zeros : boolean (default is ``False``)
If ``True``, applies the ``eliminate_zeros`` method of the sparse
array to remove all zero locations.
Returns
-------
An incidence matrix in the specified sparse format
Notes
-----
The incidence matrix is a cousin to the adjacency matrix, and used by
OpenPNM for finding the throats connected to a give pore or set of
pores. Specifically, an incidence matrix has Np rows and Nt columns,
and each row represents a pore, containing non-zero values at the
locations corresponding to the indices of the throats connected to that
pore. The ``weights`` argument indicates what value to place at each
location, with the default being 1's to simply indicate connections.
Another useful option is throat indices, such that the data values
on each row indicate which throats are connected to the pore, though
this is redundant as it is identical to the locations of non-zeros.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> weights = sp.rand(pn.num_throats(), ) < 0.5
>>> im = pn.create_incidence_matrix(weights=weights, fmt='csr') | Below is the the instruction that describes the task:
### Input:
r"""
Creates a weighted incidence matrix in the desired sparse format
Parameters
----------
weights : array_like, optional
An array containing the throat values to enter into the matrix (In
graph theory these are known as the 'weights'). If omitted, ones
are used to create a standard incidence matrix representing
connectivity only.
fmt : string, optional
The sparse storage format to return. Options are:
**'coo'** : (default) This is the native format of OpenPNMs data
**'lil'** : Enables row-wise slice of the matrix
**'csr'** : Favored by most linear algebra routines
**'dok'** : Enables subscript access of locations
drop_zeros : boolean (default is ``False``)
If ``True``, applies the ``eliminate_zeros`` method of the sparse
array to remove all zero locations.
Returns
-------
An incidence matrix in the specified sparse format
Notes
-----
The incidence matrix is a cousin to the adjacency matrix, and used by
OpenPNM for finding the throats connected to a give pore or set of
pores. Specifically, an incidence matrix has Np rows and Nt columns,
and each row represents a pore, containing non-zero values at the
locations corresponding to the indices of the throats connected to that
pore. The ``weights`` argument indicates what value to place at each
location, with the default being 1's to simply indicate connections.
Another useful option is throat indices, such that the data values
on each row indicate which throats are connected to the pore, though
this is redundant as it is identical to the locations of non-zeros.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> weights = sp.rand(pn.num_throats(), ) < 0.5
>>> im = pn.create_incidence_matrix(weights=weights, fmt='csr')
### Response:
def create_incidence_matrix(self, weights=None, fmt='coo',
drop_zeros=False):
r"""
Creates a weighted incidence matrix in the desired sparse format
Parameters
----------
weights : array_like, optional
An array containing the throat values to enter into the matrix (In
graph theory these are known as the 'weights'). If omitted, ones
are used to create a standard incidence matrix representing
connectivity only.
fmt : string, optional
The sparse storage format to return. Options are:
**'coo'** : (default) This is the native format of OpenPNMs data
**'lil'** : Enables row-wise slice of the matrix
**'csr'** : Favored by most linear algebra routines
**'dok'** : Enables subscript access of locations
drop_zeros : boolean (default is ``False``)
If ``True``, applies the ``eliminate_zeros`` method of the sparse
array to remove all zero locations.
Returns
-------
An incidence matrix in the specified sparse format
Notes
-----
The incidence matrix is a cousin to the adjacency matrix, and used by
OpenPNM for finding the throats connected to a give pore or set of
pores. Specifically, an incidence matrix has Np rows and Nt columns,
and each row represents a pore, containing non-zero values at the
locations corresponding to the indices of the throats connected to that
pore. The ``weights`` argument indicates what value to place at each
location, with the default being 1's to simply indicate connections.
Another useful option is throat indices, such that the data values
on each row indicate which throats are connected to the pore, though
this is redundant as it is identical to the locations of non-zeros.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> weights = sp.rand(pn.num_throats(), ) < 0.5
>>> im = pn.create_incidence_matrix(weights=weights, fmt='csr')
"""
# Check if provided data is valid
if weights is None:
weights = sp.ones((self.Nt,), dtype=int)
elif sp.shape(weights)[0] != self.Nt:
raise Exception('Received dataset of incorrect length')
conn = self['throat.conns']
row = conn[:, 0]
row = sp.append(row, conn[:, 1])
col = sp.arange(self.Nt)
col = sp.append(col, col)
weights = sp.append(weights, weights)
temp = sprs.coo.coo_matrix((weights, (row, col)), (self.Np, self.Nt))
if drop_zeros:
temp.eliminate_zeros()
# Convert to requested format
if fmt == 'coo':
pass # temp is already in coo format
elif fmt == 'csr':
temp = temp.tocsr()
elif fmt == 'lil':
temp = temp.tolil()
elif fmt == 'dok':
temp = temp.todok()
return temp |
def paintEvent(self, event):
"""
Overloads the paint event to paint additional \
hint information if no text is set on the \
editor.
:param event | <QPaintEvent>
"""
super(XLineEdit, self).paintEvent(event)
# paint the hint text if not text is set
if self.text() and not (self.icon() and not self.icon().isNull()):
return
# paint the hint text
with XPainter(self) as painter:
painter.setPen(self.hintColor())
icon = self.icon()
left, top, right, bottom = self.getTextMargins()
w = self.width()
h = self.height() - 2
w -= (right + left)
h -= (bottom + top)
if icon and not icon.isNull():
size = icon.actualSize(self.iconSize())
x = self.cornerRadius() + 2
y = (self.height() - size.height()) / 2.0
painter.drawPixmap(x, y, icon.pixmap(size.width(), size.height()))
w -= size.width() - 2
else:
x = 6 + left
w -= self._buttonWidth
y = 2 + top
# create the elided hint
if not self.text() and self.hint():
rect = self.cursorRect()
metrics = QFontMetrics(self.font())
hint = metrics.elidedText(self.hint(), Qt.ElideRight, w)
align = self.alignment()
if align & Qt.AlignHCenter:
x = 0
else:
x = rect.center().x()
painter.drawText(x, y, w, h, align, hint) | Overloads the paint event to paint additional \
hint information if no text is set on the \
editor.
:param event | <QPaintEvent> | Below is the the instruction that describes the task:
### Input:
Overloads the paint event to paint additional \
hint information if no text is set on the \
editor.
:param event | <QPaintEvent>
### Response:
def paintEvent(self, event):
"""
Overloads the paint event to paint additional \
hint information if no text is set on the \
editor.
:param event | <QPaintEvent>
"""
super(XLineEdit, self).paintEvent(event)
# paint the hint text if not text is set
if self.text() and not (self.icon() and not self.icon().isNull()):
return
# paint the hint text
with XPainter(self) as painter:
painter.setPen(self.hintColor())
icon = self.icon()
left, top, right, bottom = self.getTextMargins()
w = self.width()
h = self.height() - 2
w -= (right + left)
h -= (bottom + top)
if icon and not icon.isNull():
size = icon.actualSize(self.iconSize())
x = self.cornerRadius() + 2
y = (self.height() - size.height()) / 2.0
painter.drawPixmap(x, y, icon.pixmap(size.width(), size.height()))
w -= size.width() - 2
else:
x = 6 + left
w -= self._buttonWidth
y = 2 + top
# create the elided hint
if not self.text() and self.hint():
rect = self.cursorRect()
metrics = QFontMetrics(self.font())
hint = metrics.elidedText(self.hint(), Qt.ElideRight, w)
align = self.alignment()
if align & Qt.AlignHCenter:
x = 0
else:
x = rect.center().x()
painter.drawText(x, y, w, h, align, hint) |
def to_dictionary(
self,
key_selector=None,
value_selector=None):
"""Build a dictionary from the source sequence.
Args:
key_selector: A unary callable to extract a key from each item or None.
If None, the default key selector produces a single dictionary key, which
if the key of this Grouping.
value_selector: A unary callable to extract a value from each item.
If None, the default value selector produces a list, which contains all
elements from this Grouping.
Note: This method uses immediate execution.
Raises:
ValueError: If the Queryable is closed.
TypeError: If key_selector is not callable.
TypeError: If value_selector is not callable.
"""
if key_selector is None:
key_selector = lambda _: self.key
if value_selector is None:
value_selector = lambda _: self.to_list()
return super(Grouping, self).to_dictionary(key_selector, value_selector) | Build a dictionary from the source sequence.
Args:
key_selector: A unary callable to extract a key from each item or None.
If None, the default key selector produces a single dictionary key, which
if the key of this Grouping.
value_selector: A unary callable to extract a value from each item.
If None, the default value selector produces a list, which contains all
elements from this Grouping.
Note: This method uses immediate execution.
Raises:
ValueError: If the Queryable is closed.
TypeError: If key_selector is not callable.
TypeError: If value_selector is not callable. | Below is the the instruction that describes the task:
### Input:
Build a dictionary from the source sequence.
Args:
key_selector: A unary callable to extract a key from each item or None.
If None, the default key selector produces a single dictionary key, which
if the key of this Grouping.
value_selector: A unary callable to extract a value from each item.
If None, the default value selector produces a list, which contains all
elements from this Grouping.
Note: This method uses immediate execution.
Raises:
ValueError: If the Queryable is closed.
TypeError: If key_selector is not callable.
TypeError: If value_selector is not callable.
### Response:
def to_dictionary(
self,
key_selector=None,
value_selector=None):
"""Build a dictionary from the source sequence.
Args:
key_selector: A unary callable to extract a key from each item or None.
If None, the default key selector produces a single dictionary key, which
if the key of this Grouping.
value_selector: A unary callable to extract a value from each item.
If None, the default value selector produces a list, which contains all
elements from this Grouping.
Note: This method uses immediate execution.
Raises:
ValueError: If the Queryable is closed.
TypeError: If key_selector is not callable.
TypeError: If value_selector is not callable.
"""
if key_selector is None:
key_selector = lambda _: self.key
if value_selector is None:
value_selector = lambda _: self.to_list()
return super(Grouping, self).to_dictionary(key_selector, value_selector) |
def _get_full_block(grouped_dicoms):
"""
Generate a full datablock containing all timepoints
"""
# For each slice / mosaic create a data volume block
data_blocks = []
for index in range(0, len(grouped_dicoms)):
logger.info('Creating block %s of %s' % (index + 1, len(grouped_dicoms)))
data_blocks.append(_timepoint_to_block(grouped_dicoms[index]))
# Add the data_blocks together to one 4d block
size_x = numpy.shape(data_blocks[0])[0]
size_y = numpy.shape(data_blocks[0])[1]
size_z = numpy.shape(data_blocks[0])[2]
size_t = len(data_blocks)
full_block = numpy.zeros((size_x, size_y, size_z, size_t), dtype=data_blocks[0].dtype)
for index in range(0, size_t):
if full_block[:, :, :, index].shape != data_blocks[index].shape:
logger.warning('Missing slices (slice count mismatch between timepoint %s and %s)' % (index - 1, index))
logger.warning('---------------------------------------------------------')
logger.warning(full_block[:, :, :, index].shape)
logger.warning(data_blocks[index].shape)
logger.warning('---------------------------------------------------------')
raise ConversionError("MISSING_DICOM_FILES")
full_block[:, :, :, index] = data_blocks[index]
return full_block | Generate a full datablock containing all timepoints | Below is the the instruction that describes the task:
### Input:
Generate a full datablock containing all timepoints
### Response:
def _get_full_block(grouped_dicoms):
"""
Generate a full datablock containing all timepoints
"""
# For each slice / mosaic create a data volume block
data_blocks = []
for index in range(0, len(grouped_dicoms)):
logger.info('Creating block %s of %s' % (index + 1, len(grouped_dicoms)))
data_blocks.append(_timepoint_to_block(grouped_dicoms[index]))
# Add the data_blocks together to one 4d block
size_x = numpy.shape(data_blocks[0])[0]
size_y = numpy.shape(data_blocks[0])[1]
size_z = numpy.shape(data_blocks[0])[2]
size_t = len(data_blocks)
full_block = numpy.zeros((size_x, size_y, size_z, size_t), dtype=data_blocks[0].dtype)
for index in range(0, size_t):
if full_block[:, :, :, index].shape != data_blocks[index].shape:
logger.warning('Missing slices (slice count mismatch between timepoint %s and %s)' % (index - 1, index))
logger.warning('---------------------------------------------------------')
logger.warning(full_block[:, :, :, index].shape)
logger.warning(data_blocks[index].shape)
logger.warning('---------------------------------------------------------')
raise ConversionError("MISSING_DICOM_FILES")
full_block[:, :, :, index] = data_blocks[index]
return full_block |
def reset_secret(self):
"""
Resets the client secret for this client.
"""
result = self._client.post("{}/reset_secret".format(OAuthClient.api_endpoint), model=self)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when resetting secret!', json=result)
self._populate(result)
return self.secret | Resets the client secret for this client. | Below is the the instruction that describes the task:
### Input:
Resets the client secret for this client.
### Response:
def reset_secret(self):
"""
Resets the client secret for this client.
"""
result = self._client.post("{}/reset_secret".format(OAuthClient.api_endpoint), model=self)
if not 'id' in result:
raise UnexpectedResponseError('Unexpected response when resetting secret!', json=result)
self._populate(result)
return self.secret |
def _assert_validators(self, validators):
"""Asserts if all validators in the list are satisfied.
It asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified.
Raises:
AttributeError: Raised if validators work with a non-existing flag.
IllegalFlagValueError: Raised if validation fails for at least one
validator.
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.verify(self)
except _exceptions.ValidationError as e:
message = validator.print_flags_with_values(self)
raise _exceptions.IllegalFlagValueError('%s: %s' % (message, str(e))) | Asserts if all validators in the list are satisfied.
It asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified.
Raises:
AttributeError: Raised if validators work with a non-existing flag.
IllegalFlagValueError: Raised if validation fails for at least one
validator. | Below is the the instruction that describes the task:
### Input:
Asserts if all validators in the list are satisfied.
It asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified.
Raises:
AttributeError: Raised if validators work with a non-existing flag.
IllegalFlagValueError: Raised if validation fails for at least one
validator.
### Response:
def _assert_validators(self, validators):
"""Asserts if all validators in the list are satisfied.
It asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified.
Raises:
AttributeError: Raised if validators work with a non-existing flag.
IllegalFlagValueError: Raised if validation fails for at least one
validator.
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.verify(self)
except _exceptions.ValidationError as e:
message = validator.print_flags_with_values(self)
raise _exceptions.IllegalFlagValueError('%s: %s' % (message, str(e))) |
def _validate_args(env, args):
"""Raises an ArgumentError if the given arguments are not valid."""
if all([args['cpu'], args['flavor']]):
raise exceptions.ArgumentError(
'[-c | --cpu] not allowed with [-f | --flavor]')
if all([args['memory'], args['flavor']]):
raise exceptions.ArgumentError(
'[-m | --memory] not allowed with [-f | --flavor]')
if all([args['dedicated'], args['flavor']]):
raise exceptions.ArgumentError(
'[-d | --dedicated] not allowed with [-f | --flavor]')
if all([args['host_id'], args['flavor']]):
raise exceptions.ArgumentError(
'[-h | --host-id] not allowed with [-f | --flavor]')
if all([args['userdata'], args['userfile']]):
raise exceptions.ArgumentError(
'[-u | --userdata] not allowed with [-F | --userfile]')
image_args = [args['os'], args['image']]
if all(image_args):
raise exceptions.ArgumentError(
'[-o | --os] not allowed with [--image]')
while not any([args['os'], args['image']]):
args['os'] = env.input("Operating System Code", default="", show_default=False)
if not args['os']:
args['image'] = env.input("Image", default="", show_default=False) | Raises an ArgumentError if the given arguments are not valid. | Below is the the instruction that describes the task:
### Input:
Raises an ArgumentError if the given arguments are not valid.
### Response:
def _validate_args(env, args):
"""Raises an ArgumentError if the given arguments are not valid."""
if all([args['cpu'], args['flavor']]):
raise exceptions.ArgumentError(
'[-c | --cpu] not allowed with [-f | --flavor]')
if all([args['memory'], args['flavor']]):
raise exceptions.ArgumentError(
'[-m | --memory] not allowed with [-f | --flavor]')
if all([args['dedicated'], args['flavor']]):
raise exceptions.ArgumentError(
'[-d | --dedicated] not allowed with [-f | --flavor]')
if all([args['host_id'], args['flavor']]):
raise exceptions.ArgumentError(
'[-h | --host-id] not allowed with [-f | --flavor]')
if all([args['userdata'], args['userfile']]):
raise exceptions.ArgumentError(
'[-u | --userdata] not allowed with [-F | --userfile]')
image_args = [args['os'], args['image']]
if all(image_args):
raise exceptions.ArgumentError(
'[-o | --os] not allowed with [--image]')
while not any([args['os'], args['image']]):
args['os'] = env.input("Operating System Code", default="", show_default=False)
if not args['os']:
args['image'] = env.input("Image", default="", show_default=False) |
def smembers(self, key, *, encoding=_NOTSET):
"""Get all the members in a set."""
return self.execute(b'SMEMBERS', key, encoding=encoding) | Get all the members in a set. | Below is the the instruction that describes the task:
### Input:
Get all the members in a set.
### Response:
def smembers(self, key, *, encoding=_NOTSET):
"""Get all the members in a set."""
return self.execute(b'SMEMBERS', key, encoding=encoding) |
def saveToFile(self,imageObjectList):
""" Saves the static mask to a file
it uses the signatures associated with each
mask to contruct the filename for the output mask image.
"""
virtual = imageObjectList[0].inmemory
for key in self.masklist.keys():
#check to see if the file already exists on disk
filename = self.masknames[key]
#create a new fits image with the mask array and a standard header
#open a new header and data unit
newHDU = fits.PrimaryHDU()
newHDU.data = self.masklist[key]
if virtual:
for img in imageObjectList:
img.saveVirtualOutputs({filename:newHDU})
else:
try:
newHDU.writeto(filename, overwrite=True)
log.info("Saving static mask to disk: %s" % filename)
except IOError:
log.error("Problem saving static mask file: %s to "
"disk!\n" % filename)
raise IOError | Saves the static mask to a file
it uses the signatures associated with each
mask to contruct the filename for the output mask image. | Below is the the instruction that describes the task:
### Input:
Saves the static mask to a file
it uses the signatures associated with each
mask to contruct the filename for the output mask image.
### Response:
def saveToFile(self,imageObjectList):
""" Saves the static mask to a file
it uses the signatures associated with each
mask to contruct the filename for the output mask image.
"""
virtual = imageObjectList[0].inmemory
for key in self.masklist.keys():
#check to see if the file already exists on disk
filename = self.masknames[key]
#create a new fits image with the mask array and a standard header
#open a new header and data unit
newHDU = fits.PrimaryHDU()
newHDU.data = self.masklist[key]
if virtual:
for img in imageObjectList:
img.saveVirtualOutputs({filename:newHDU})
else:
try:
newHDU.writeto(filename, overwrite=True)
log.info("Saving static mask to disk: %s" % filename)
except IOError:
log.error("Problem saving static mask file: %s to "
"disk!\n" % filename)
raise IOError |
def add_model(self, model):
""" Adds a PyFlux model to the aggregating algorithm
Parameters
----------
model : pf.[MODEL]
A PyFlux univariate model
Returns
----------
- Void (changes self.model_list)
"""
if model.model_type not in self.supported_models:
raise ValueError('Model type not supported for Aggregate! Apologies')
if not self.model_list:
self.model_list.append(model)
if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']:
self.data = np.abs(model.data)
else:
self.data = model.data
self.index = model.index
else:
if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']:
if np.isclose(np.abs(np.abs(model.data[-self.match_window:])-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX':
self.model_list.append(model)
else:
raise ValueError('Data entered is deemed different based on %s last values!' % (s))
else:
if np.isclose(np.abs(model.data[-self.match_window:]-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX':
self.model_list.append(model)
else:
raise ValueError('Data entered is deemed different based on %s last values!' % (s))
self.model_names = [i.model_name for i in self.model_list] | Adds a PyFlux model to the aggregating algorithm
Parameters
----------
model : pf.[MODEL]
A PyFlux univariate model
Returns
----------
- Void (changes self.model_list) | Below is the the instruction that describes the task:
### Input:
Adds a PyFlux model to the aggregating algorithm
Parameters
----------
model : pf.[MODEL]
A PyFlux univariate model
Returns
----------
- Void (changes self.model_list)
### Response:
def add_model(self, model):
""" Adds a PyFlux model to the aggregating algorithm
Parameters
----------
model : pf.[MODEL]
A PyFlux univariate model
Returns
----------
- Void (changes self.model_list)
"""
if model.model_type not in self.supported_models:
raise ValueError('Model type not supported for Aggregate! Apologies')
if not self.model_list:
self.model_list.append(model)
if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']:
self.data = np.abs(model.data)
else:
self.data = model.data
self.index = model.index
else:
if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']:
if np.isclose(np.abs(np.abs(model.data[-self.match_window:])-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX':
self.model_list.append(model)
else:
raise ValueError('Data entered is deemed different based on %s last values!' % (s))
else:
if np.isclose(np.abs(model.data[-self.match_window:]-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX':
self.model_list.append(model)
else:
raise ValueError('Data entered is deemed different based on %s last values!' % (s))
self.model_names = [i.model_name for i in self.model_list] |
def shuffle(self, *args):
"""Shuffles all arguments and returns them.
ARG_1,...,ARG_N
%{SHUFFLE:A, B ,...,F} -> 'CDA B FE'
"""
call_args = list(args)
self.random.shuffle(call_args)
return ''.join(call_args) | Shuffles all arguments and returns them.
ARG_1,...,ARG_N
%{SHUFFLE:A, B ,...,F} -> 'CDA B FE' | Below is the the instruction that describes the task:
### Input:
Shuffles all arguments and returns them.
ARG_1,...,ARG_N
%{SHUFFLE:A, B ,...,F} -> 'CDA B FE'
### Response:
def shuffle(self, *args):
"""Shuffles all arguments and returns them.
ARG_1,...,ARG_N
%{SHUFFLE:A, B ,...,F} -> 'CDA B FE'
"""
call_args = list(args)
self.random.shuffle(call_args)
return ''.join(call_args) |
def move_to_result(self, lst_idx):
"""Moves element from lst available at lst_idx."""
self.in_result_idx.add(lst_idx)
if lst_idx in self.not_in_result_root_match_idx:
self.not_in_result_root_match_idx.remove(lst_idx) | Moves element from lst available at lst_idx. | Below is the the instruction that describes the task:
### Input:
Moves element from lst available at lst_idx.
### Response:
def move_to_result(self, lst_idx):
"""Moves element from lst available at lst_idx."""
self.in_result_idx.add(lst_idx)
if lst_idx in self.not_in_result_root_match_idx:
self.not_in_result_root_match_idx.remove(lst_idx) |
def pretty_print(self, decimal_digits=2):
"""
Returns a string repr of the wavefunction, ignoring all outcomes with approximately zero
amplitude (up to a certain number of decimal digits) and rounding the amplitudes to
decimal_digits.
:param int decimal_digits: The number of digits to truncate to.
:return: A dict with outcomes as keys and complex amplitudes as values.
:rtype: str
"""
outcome_dict = {}
qubit_num = len(self)
pp_string = ""
for index, amplitude in enumerate(self.amplitudes):
outcome = get_bitstring_from_index(index, qubit_num)
amplitude = round(amplitude.real, decimal_digits) + \
round(amplitude.imag, decimal_digits) * 1.j
if amplitude != 0.:
outcome_dict[outcome] = amplitude
pp_string += str(amplitude) + "|{}> + ".format(outcome)
if len(pp_string) >= 3:
pp_string = pp_string[:-3] # remove the dangling + if it is there
return pp_string | Returns a string repr of the wavefunction, ignoring all outcomes with approximately zero
amplitude (up to a certain number of decimal digits) and rounding the amplitudes to
decimal_digits.
:param int decimal_digits: The number of digits to truncate to.
:return: A dict with outcomes as keys and complex amplitudes as values.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Returns a string repr of the wavefunction, ignoring all outcomes with approximately zero
amplitude (up to a certain number of decimal digits) and rounding the amplitudes to
decimal_digits.
:param int decimal_digits: The number of digits to truncate to.
:return: A dict with outcomes as keys and complex amplitudes as values.
:rtype: str
### Response:
def pretty_print(self, decimal_digits=2):
"""
Returns a string repr of the wavefunction, ignoring all outcomes with approximately zero
amplitude (up to a certain number of decimal digits) and rounding the amplitudes to
decimal_digits.
:param int decimal_digits: The number of digits to truncate to.
:return: A dict with outcomes as keys and complex amplitudes as values.
:rtype: str
"""
outcome_dict = {}
qubit_num = len(self)
pp_string = ""
for index, amplitude in enumerate(self.amplitudes):
outcome = get_bitstring_from_index(index, qubit_num)
amplitude = round(amplitude.real, decimal_digits) + \
round(amplitude.imag, decimal_digits) * 1.j
if amplitude != 0.:
outcome_dict[outcome] = amplitude
pp_string += str(amplitude) + "|{}> + ".format(outcome)
if len(pp_string) >= 3:
pp_string = pp_string[:-3] # remove the dangling + if it is there
return pp_string |
def save_json(obj, filename, **kwargs):
"""
Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`.
"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(obj, f, **kwargs) | Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`. | Below is the the instruction that describes the task:
### Input:
Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`.
### Response:
def save_json(obj, filename, **kwargs):
"""
Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`.
"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(obj, f, **kwargs) |
def transform_sparql_construct(rdf, construct_query):
"""Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph."""
logging.debug("performing SPARQL CONSTRUCT transformation")
if construct_query[0] == '@': # actual query should be read from file
construct_query = file(construct_query[1:]).read()
logging.debug("CONSTRUCT query: %s", construct_query)
newgraph = Graph()
for triple in rdf.query(construct_query):
newgraph.add(triple)
return newgraph | Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph. | Below is the the instruction that describes the task:
### Input:
Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph.
### Response:
def transform_sparql_construct(rdf, construct_query):
"""Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph."""
logging.debug("performing SPARQL CONSTRUCT transformation")
if construct_query[0] == '@': # actual query should be read from file
construct_query = file(construct_query[1:]).read()
logging.debug("CONSTRUCT query: %s", construct_query)
newgraph = Graph()
for triple in rdf.query(construct_query):
newgraph.add(triple)
return newgraph |
def _raw_write(self):
"""
Takes ciphertext from the memory bio and writes it to the
socket.
:return:
A byte string of ciphertext going to the socket. Used
for debugging the handshake only.
"""
data_available = libssl.BIO_ctrl_pending(self._wbio)
if data_available == 0:
return b''
to_read = min(self._buffer_size, data_available)
read = libssl.BIO_read(self._wbio, self._bio_write_buffer, to_read)
to_write = bytes_from_buffer(self._bio_write_buffer, read)
output = to_write
while len(to_write):
raise_disconnect = False
try:
sent = self._socket.send(to_write)
except (socket_.error) as e:
# Handle ECONNRESET and EPIPE
if e.errno == 104 or e.errno == 32:
raise_disconnect = True
else:
raise
if raise_disconnect:
raise_disconnection()
to_write = to_write[sent:]
if len(to_write):
self.select_write()
return output | Takes ciphertext from the memory bio and writes it to the
socket.
:return:
A byte string of ciphertext going to the socket. Used
for debugging the handshake only. | Below is the the instruction that describes the task:
### Input:
Takes ciphertext from the memory bio and writes it to the
socket.
:return:
A byte string of ciphertext going to the socket. Used
for debugging the handshake only.
### Response:
def _raw_write(self):
"""
Takes ciphertext from the memory bio and writes it to the
socket.
:return:
A byte string of ciphertext going to the socket. Used
for debugging the handshake only.
"""
data_available = libssl.BIO_ctrl_pending(self._wbio)
if data_available == 0:
return b''
to_read = min(self._buffer_size, data_available)
read = libssl.BIO_read(self._wbio, self._bio_write_buffer, to_read)
to_write = bytes_from_buffer(self._bio_write_buffer, read)
output = to_write
while len(to_write):
raise_disconnect = False
try:
sent = self._socket.send(to_write)
except (socket_.error) as e:
# Handle ECONNRESET and EPIPE
if e.errno == 104 or e.errno == 32:
raise_disconnect = True
else:
raise
if raise_disconnect:
raise_disconnection()
to_write = to_write[sent:]
if len(to_write):
self.select_write()
return output |
def p_expression_lessthan(self, p):
'expression : expression LT expression'
p[0] = LessThan(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | expression : expression LT expression | Below is the the instruction that describes the task:
### Input:
expression : expression LT expression
### Response:
def p_expression_lessthan(self, p):
'expression : expression LT expression'
p[0] = LessThan(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def data(self, data: numpy.ndarray) -> None:
"""Set the data.
:param data: A numpy ndarray.
.. versionadded:: 1.0
Scriptable: Yes
"""
self.__data_item.set_data(numpy.copy(data)) | Set the data.
:param data: A numpy ndarray.
.. versionadded:: 1.0
Scriptable: Yes | Below is the the instruction that describes the task:
### Input:
Set the data.
:param data: A numpy ndarray.
.. versionadded:: 1.0
Scriptable: Yes
### Response:
def data(self, data: numpy.ndarray) -> None:
"""Set the data.
:param data: A numpy ndarray.
.. versionadded:: 1.0
Scriptable: Yes
"""
self.__data_item.set_data(numpy.copy(data)) |
def validate_response(expected_responses):
""" Decorator to validate responses from QTM """
def internal_decorator(function):
@wraps(function)
async def wrapper(*args, **kwargs):
response = await function(*args, **kwargs)
for expected_response in expected_responses:
if response.startswith(expected_response):
return response
raise QRTCommandException(
"Expected %s but got %s" % (expected_responses, response)
)
return wrapper
return internal_decorator | Decorator to validate responses from QTM | Below is the the instruction that describes the task:
### Input:
Decorator to validate responses from QTM
### Response:
def validate_response(expected_responses):
""" Decorator to validate responses from QTM """
def internal_decorator(function):
@wraps(function)
async def wrapper(*args, **kwargs):
response = await function(*args, **kwargs)
for expected_response in expected_responses:
if response.startswith(expected_response):
return response
raise QRTCommandException(
"Expected %s but got %s" % (expected_responses, response)
)
return wrapper
return internal_decorator |
def get_host_power_status(self):
"""Request the power state of the server.
:returns: Power State of the server, 'ON' or 'OFF'
:raises: IloError, on an error from iLO.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
return GET_POWER_STATE_MAP.get(sushy_system.power_state) | Request the power state of the server.
:returns: Power State of the server, 'ON' or 'OFF'
:raises: IloError, on an error from iLO. | Below is the the instruction that describes the task:
### Input:
Request the power state of the server.
:returns: Power State of the server, 'ON' or 'OFF'
:raises: IloError, on an error from iLO.
### Response:
def get_host_power_status(self):
"""Request the power state of the server.
:returns: Power State of the server, 'ON' or 'OFF'
:raises: IloError, on an error from iLO.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
return GET_POWER_STATE_MAP.get(sushy_system.power_state) |
async def seek(self, pos, whence=sync_io.SEEK_SET):
"""Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other
values are SEEK_CUR or 1 (move relative to current position, positive
or negative), and SEEK_END or 2 (move relative to end of file, usually
negative, although many platforms allow seeking beyond the end of a
file).
Note that not all file objects are seekable.
"""
return self._stream.seek(pos, whence) | Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other
values are SEEK_CUR or 1 (move relative to current position, positive
or negative), and SEEK_END or 2 (move relative to end of file, usually
negative, although many platforms allow seeking beyond the end of a
file).
Note that not all file objects are seekable. | Below is the the instruction that describes the task:
### Input:
Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other
values are SEEK_CUR or 1 (move relative to current position, positive
or negative), and SEEK_END or 2 (move relative to end of file, usually
negative, although many platforms allow seeking beyond the end of a
file).
Note that not all file objects are seekable.
### Response:
async def seek(self, pos, whence=sync_io.SEEK_SET):
"""Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other
values are SEEK_CUR or 1 (move relative to current position, positive
or negative), and SEEK_END or 2 (move relative to end of file, usually
negative, although many platforms allow seeking beyond the end of a
file).
Note that not all file objects are seekable.
"""
return self._stream.seek(pos, whence) |
def update_team(self, team_data, project_id, team_id):
"""UpdateTeam.
[Preview API] Update a team's name and/or description.
:param :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>` team_data:
:param str project_id: The name or ID (GUID) of the team project containing the team to update.
:param str team_id: The name of ID of the team to update.
:rtype: :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
if team_id is not None:
route_values['teamId'] = self._serialize.url('team_id', team_id, 'str')
content = self._serialize.body(team_data, 'WebApiTeam')
response = self._send(http_method='PATCH',
location_id='d30a3dd1-f8ba-442a-b86a-bd0c0c383e59',
version='5.1-preview.2',
route_values=route_values,
content=content)
return self._deserialize('WebApiTeam', response) | UpdateTeam.
[Preview API] Update a team's name and/or description.
:param :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>` team_data:
:param str project_id: The name or ID (GUID) of the team project containing the team to update.
:param str team_id: The name of ID of the team to update.
:rtype: :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>` | Below is the the instruction that describes the task:
### Input:
UpdateTeam.
[Preview API] Update a team's name and/or description.
:param :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>` team_data:
:param str project_id: The name or ID (GUID) of the team project containing the team to update.
:param str team_id: The name of ID of the team to update.
:rtype: :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>`
### Response:
def update_team(self, team_data, project_id, team_id):
"""UpdateTeam.
[Preview API] Update a team's name and/or description.
:param :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>` team_data:
:param str project_id: The name or ID (GUID) of the team project containing the team to update.
:param str team_id: The name of ID of the team to update.
:rtype: :class:`<WebApiTeam> <azure.devops.v5_1.core.models.WebApiTeam>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
if team_id is not None:
route_values['teamId'] = self._serialize.url('team_id', team_id, 'str')
content = self._serialize.body(team_data, 'WebApiTeam')
response = self._send(http_method='PATCH',
location_id='d30a3dd1-f8ba-442a-b86a-bd0c0c383e59',
version='5.1-preview.2',
route_values=route_values,
content=content)
return self._deserialize('WebApiTeam', response) |
def gradient(self, image, label):
"""Calculates the gradient of the cross-entropy loss w.r.t. the image.
The default implementation calls predictions_and_gradient.
Subclasses can provide more efficient implementations that
only calculate the gradient.
Parameters
----------
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
label : int
Reference label used to calculate the gradient.
Returns
-------
gradient : `numpy.ndarray`
The gradient of the cross-entropy loss w.r.t. the image. Will
have the same shape as the image.
See Also
--------
:meth:`gradient`
"""
_, gradient = self.predictions_and_gradient(image, label)
return gradient | Calculates the gradient of the cross-entropy loss w.r.t. the image.
The default implementation calls predictions_and_gradient.
Subclasses can provide more efficient implementations that
only calculate the gradient.
Parameters
----------
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
label : int
Reference label used to calculate the gradient.
Returns
-------
gradient : `numpy.ndarray`
The gradient of the cross-entropy loss w.r.t. the image. Will
have the same shape as the image.
See Also
--------
:meth:`gradient` | Below is the the instruction that describes the task:
### Input:
Calculates the gradient of the cross-entropy loss w.r.t. the image.
The default implementation calls predictions_and_gradient.
Subclasses can provide more efficient implementations that
only calculate the gradient.
Parameters
----------
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
label : int
Reference label used to calculate the gradient.
Returns
-------
gradient : `numpy.ndarray`
The gradient of the cross-entropy loss w.r.t. the image. Will
have the same shape as the image.
See Also
--------
:meth:`gradient`
### Response:
def gradient(self, image, label):
"""Calculates the gradient of the cross-entropy loss w.r.t. the image.
The default implementation calls predictions_and_gradient.
Subclasses can provide more efficient implementations that
only calculate the gradient.
Parameters
----------
image : `numpy.ndarray`
Single input with shape as expected by the model
(without the batch dimension).
label : int
Reference label used to calculate the gradient.
Returns
-------
gradient : `numpy.ndarray`
The gradient of the cross-entropy loss w.r.t. the image. Will
have the same shape as the image.
See Also
--------
:meth:`gradient`
"""
_, gradient = self.predictions_and_gradient(image, label)
return gradient |
def get_frontend_data_dict_for_placeholders(placeholders, request, editable=False):
"""
Takes a list of placeholder instances and returns the data that is used by the frontend to render all contents.
The returned dict is grouped by placeholder slots.
"""
data_dict = {}
for placeholder in placeholders:
if placeholder:
plugins = []
# We don't use the helper method `placeholder.get_plugins()` because of the wrong order by path.
placeholder_plugins = placeholder.cmsplugin_set.filter(language=request.LANGUAGE_CODE).order_by(
settings.DJANGOCMS_SPA_PLUGIN_ORDER_FIELD)
for plugin in placeholder_plugins:
# We need the complete cascading structure of the plugins in the frontend. This is why we ignore the
# children here and add them later in the loop.
if not plugin.parent:
plugins.append(get_frontend_data_dict_for_plugin(
request=request,
plugin=plugin,
editable=editable)
)
if plugins or editable:
data_dict[placeholder.slot] = {
'type': 'cmp-%s' % placeholder.slot,
'plugins': plugins,
}
if editable:
# This is the structure of the template `cms/toolbar/placeholder.html` that is used to register
# the frontend editing.
from cms.plugin_pool import plugin_pool
plugin_types = [cls.__name__ for cls in plugin_pool.get_all_plugins(placeholder.slot, placeholder.page)]
allowed_plugins = plugin_types + plugin_pool.get_system_plugins()
data_dict[placeholder.slot]['cms'] = [
'cms-placeholder-{}'.format(placeholder.pk),
{
'type': 'placeholder',
'name': str(placeholder.get_label()),
'page_language': request.LANGUAGE_CODE,
'placeholder_id': placeholder.pk,
'plugin_language': request.LANGUAGE_CODE,
'plugin_restriction': [module for module in allowed_plugins],
'addPluginHelpTitle': 'Add plugin to placeholder {}'.format(placeholder.get_label()),
'urls': {
'add_plugin': placeholder.get_add_url(),
'copy_plugin': placeholder.get_copy_url()
}
}
]
return data_dict | Takes a list of placeholder instances and returns the data that is used by the frontend to render all contents.
The returned dict is grouped by placeholder slots. | Below is the the instruction that describes the task:
### Input:
Takes a list of placeholder instances and returns the data that is used by the frontend to render all contents.
The returned dict is grouped by placeholder slots.
### Response:
def get_frontend_data_dict_for_placeholders(placeholders, request, editable=False):
"""
Takes a list of placeholder instances and returns the data that is used by the frontend to render all contents.
The returned dict is grouped by placeholder slots.
"""
data_dict = {}
for placeholder in placeholders:
if placeholder:
plugins = []
# We don't use the helper method `placeholder.get_plugins()` because of the wrong order by path.
placeholder_plugins = placeholder.cmsplugin_set.filter(language=request.LANGUAGE_CODE).order_by(
settings.DJANGOCMS_SPA_PLUGIN_ORDER_FIELD)
for plugin in placeholder_plugins:
# We need the complete cascading structure of the plugins in the frontend. This is why we ignore the
# children here and add them later in the loop.
if not plugin.parent:
plugins.append(get_frontend_data_dict_for_plugin(
request=request,
plugin=plugin,
editable=editable)
)
if plugins or editable:
data_dict[placeholder.slot] = {
'type': 'cmp-%s' % placeholder.slot,
'plugins': plugins,
}
if editable:
# This is the structure of the template `cms/toolbar/placeholder.html` that is used to register
# the frontend editing.
from cms.plugin_pool import plugin_pool
plugin_types = [cls.__name__ for cls in plugin_pool.get_all_plugins(placeholder.slot, placeholder.page)]
allowed_plugins = plugin_types + plugin_pool.get_system_plugins()
data_dict[placeholder.slot]['cms'] = [
'cms-placeholder-{}'.format(placeholder.pk),
{
'type': 'placeholder',
'name': str(placeholder.get_label()),
'page_language': request.LANGUAGE_CODE,
'placeholder_id': placeholder.pk,
'plugin_language': request.LANGUAGE_CODE,
'plugin_restriction': [module for module in allowed_plugins],
'addPluginHelpTitle': 'Add plugin to placeholder {}'.format(placeholder.get_label()),
'urls': {
'add_plugin': placeholder.get_add_url(),
'copy_plugin': placeholder.get_copy_url()
}
}
]
return data_dict |
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February.
"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith('hr'):
offset = -1 * int(DataAttrs['intvl_in'][0])
else:
offset = 0
time = times.apply_time_offset(da[TIME_STR], hours=offset)
da[TIME_STR] = time
return da | Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February. | Below is the the instruction that describes the task:
### Input:
Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February.
### Response:
def _maybe_apply_time_shift(da, time_offset=None, **DataAttrs):
"""Correct off-by-one error in GFDL instantaneous model data.
Instantaneous data that is outputted by GFDL models is generally off by
one timestep. For example, a netCDF file that is supposed to
correspond to 6 hourly data for the month of January, will have its
last time value be in February.
"""
if time_offset is not None:
time = times.apply_time_offset(da[TIME_STR], **time_offset)
da[TIME_STR] = time
else:
if DataAttrs['dtype_in_time'] == 'inst':
if DataAttrs['intvl_in'].endswith('hr'):
offset = -1 * int(DataAttrs['intvl_in'][0])
else:
offset = 0
time = times.apply_time_offset(da[TIME_STR], hours=offset)
da[TIME_STR] = time
return da |
def plot_histograms(ertobj, keys, **kwargs):
"""Generate histograms for one or more keys in the given container.
Parameters
----------
ertobj : container instance or :class:`pandas.DataFrame`
data object which contains the data.
keys : str or list of strings
which keys (column names) to plot
merge : bool, optional
if True, then generate only one figure with all key-plots as columns
(default True)
log10plot : bool, optional
default: True
extra_dims : list, optional
Examples
--------
>>> from reda.plotters import plot_histograms
>>> from reda.testing import ERTContainer
>>> figs_dict = plot_histograms(ERTContainer, "r", merge=False)
Generating histogram plot for key: r
Returns
-------
figures : dict
dictionary with the generated histogram figures
"""
# you can either provide a DataFrame or an ERT object
if isinstance(ertobj, pd.DataFrame):
df = ertobj
else:
df = ertobj.data
if df.shape[0] == 0:
raise Exception('No data present, cannot plot')
if isinstance(keys, str):
keys = [keys, ]
figures = {}
merge_figs = kwargs.get('merge', True)
if merge_figs:
nr_x = 2
nr_y = len(keys)
size_x = 15 / 2.54
size_y = 5 * nr_y / 2.54
fig, axes_all = plt.subplots(nr_y, nr_x, figsize=(size_x, size_y))
axes_all = np.atleast_2d(axes_all)
for row_nr, key in enumerate(keys):
print('Generating histogram plot for key: {0}'.format(key))
subdata_raw = df[key].values
subdata = subdata_raw[~np.isnan(subdata_raw)]
subdata = subdata[np.isfinite(subdata)]
subdata_log10_with_nan = np.log10(subdata[subdata > 0])
subdata_log10 = subdata_log10_with_nan[~np.isnan(
subdata_log10_with_nan)
]
subdata_log10 = subdata_log10[np.isfinite(subdata_log10)]
if merge_figs:
axes = axes_all[row_nr].squeeze()
else:
fig, axes = plt.subplots(1, 2, figsize=(10 / 2.54, 5 / 2.54))
ax = axes[0]
ax.hist(
subdata,
_get_nr_bins(subdata.size),
)
ax.set_xlabel(
units.get_label(key)
)
ax.set_ylabel('count')
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
ax.tick_params(axis='both', which='major', labelsize=6)
ax.tick_params(axis='both', which='minor', labelsize=6)
if subdata_log10.size > 0:
ax = axes[1]
ax.hist(
subdata_log10,
_get_nr_bins(subdata.size),
)
ax.set_xlabel(r'$log_{10}($' + units.get_label(key) + ')')
ax.set_ylabel('count')
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
else:
pass
# del(axes[1])
fig.tight_layout()
if not merge_figs:
figures[key] = fig
if merge_figs:
figures['all'] = fig
return figures | Generate histograms for one or more keys in the given container.
Parameters
----------
ertobj : container instance or :class:`pandas.DataFrame`
data object which contains the data.
keys : str or list of strings
which keys (column names) to plot
merge : bool, optional
if True, then generate only one figure with all key-plots as columns
(default True)
log10plot : bool, optional
default: True
extra_dims : list, optional
Examples
--------
>>> from reda.plotters import plot_histograms
>>> from reda.testing import ERTContainer
>>> figs_dict = plot_histograms(ERTContainer, "r", merge=False)
Generating histogram plot for key: r
Returns
-------
figures : dict
dictionary with the generated histogram figures | Below is the the instruction that describes the task:
### Input:
Generate histograms for one or more keys in the given container.
Parameters
----------
ertobj : container instance or :class:`pandas.DataFrame`
data object which contains the data.
keys : str or list of strings
which keys (column names) to plot
merge : bool, optional
if True, then generate only one figure with all key-plots as columns
(default True)
log10plot : bool, optional
default: True
extra_dims : list, optional
Examples
--------
>>> from reda.plotters import plot_histograms
>>> from reda.testing import ERTContainer
>>> figs_dict = plot_histograms(ERTContainer, "r", merge=False)
Generating histogram plot for key: r
Returns
-------
figures : dict
dictionary with the generated histogram figures
### Response:
def plot_histograms(ertobj, keys, **kwargs):
"""Generate histograms for one or more keys in the given container.
Parameters
----------
ertobj : container instance or :class:`pandas.DataFrame`
data object which contains the data.
keys : str or list of strings
which keys (column names) to plot
merge : bool, optional
if True, then generate only one figure with all key-plots as columns
(default True)
log10plot : bool, optional
default: True
extra_dims : list, optional
Examples
--------
>>> from reda.plotters import plot_histograms
>>> from reda.testing import ERTContainer
>>> figs_dict = plot_histograms(ERTContainer, "r", merge=False)
Generating histogram plot for key: r
Returns
-------
figures : dict
dictionary with the generated histogram figures
"""
# you can either provide a DataFrame or an ERT object
if isinstance(ertobj, pd.DataFrame):
df = ertobj
else:
df = ertobj.data
if df.shape[0] == 0:
raise Exception('No data present, cannot plot')
if isinstance(keys, str):
keys = [keys, ]
figures = {}
merge_figs = kwargs.get('merge', True)
if merge_figs:
nr_x = 2
nr_y = len(keys)
size_x = 15 / 2.54
size_y = 5 * nr_y / 2.54
fig, axes_all = plt.subplots(nr_y, nr_x, figsize=(size_x, size_y))
axes_all = np.atleast_2d(axes_all)
for row_nr, key in enumerate(keys):
print('Generating histogram plot for key: {0}'.format(key))
subdata_raw = df[key].values
subdata = subdata_raw[~np.isnan(subdata_raw)]
subdata = subdata[np.isfinite(subdata)]
subdata_log10_with_nan = np.log10(subdata[subdata > 0])
subdata_log10 = subdata_log10_with_nan[~np.isnan(
subdata_log10_with_nan)
]
subdata_log10 = subdata_log10[np.isfinite(subdata_log10)]
if merge_figs:
axes = axes_all[row_nr].squeeze()
else:
fig, axes = plt.subplots(1, 2, figsize=(10 / 2.54, 5 / 2.54))
ax = axes[0]
ax.hist(
subdata,
_get_nr_bins(subdata.size),
)
ax.set_xlabel(
units.get_label(key)
)
ax.set_ylabel('count')
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
ax.tick_params(axis='both', which='major', labelsize=6)
ax.tick_params(axis='both', which='minor', labelsize=6)
if subdata_log10.size > 0:
ax = axes[1]
ax.hist(
subdata_log10,
_get_nr_bins(subdata.size),
)
ax.set_xlabel(r'$log_{10}($' + units.get_label(key) + ')')
ax.set_ylabel('count')
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
else:
pass
# del(axes[1])
fig.tight_layout()
if not merge_figs:
figures[key] = fig
if merge_figs:
figures['all'] = fig
return figures |
def _check(cls, name, val, can_be_zero=False, val_type=float):
"""Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in.
"""
valid_types = [val_type]
if val_type is float:
valid_types.append(int)
if type(val) not in valid_types:
raise TypeError(
'Expect type %s for parameter %s' % (val_type.__name__, name))
if val < 0:
raise ValueError(
'Value for parameter %s has to be greater than 0' % name)
if not can_be_zero and val == 0:
raise ValueError(
'Value for parameter %s can not be 0' % name)
return val | Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in. | Below is the the instruction that describes the task:
### Input:
Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in.
### Response:
def _check(cls, name, val, can_be_zero=False, val_type=float):
"""Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in.
"""
valid_types = [val_type]
if val_type is float:
valid_types.append(int)
if type(val) not in valid_types:
raise TypeError(
'Expect type %s for parameter %s' % (val_type.__name__, name))
if val < 0:
raise ValueError(
'Value for parameter %s has to be greater than 0' % name)
if not can_be_zero and val == 0:
raise ValueError(
'Value for parameter %s can not be 0' % name)
return val |
def is_treshold_reached(self, scraped_request):
"""Check if similar requests to the given requests have already been crawled X times. Where X is the
minimum treshold amount from the options.
Args:
scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold.
Returns:
bool: True if treshold reached, false otherwise.
"""
for route in self.__routing_options.routes:
if re.compile(route).match(scraped_request.url):
count_key = str(route) + scraped_request.method
if count_key in self.__routing_count.keys():
return self.__routing_count[count_key] >= self.__routing_options.minimum_threshold
return False | Check if similar requests to the given requests have already been crawled X times. Where X is the
minimum treshold amount from the options.
Args:
scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold.
Returns:
bool: True if treshold reached, false otherwise. | Below is the the instruction that describes the task:
### Input:
Check if similar requests to the given requests have already been crawled X times. Where X is the
minimum treshold amount from the options.
Args:
scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold.
Returns:
bool: True if treshold reached, false otherwise.
### Response:
def is_treshold_reached(self, scraped_request):
"""Check if similar requests to the given requests have already been crawled X times. Where X is the
minimum treshold amount from the options.
Args:
scraped_request (:class:`nyawc.http.Request`): The request that possibly reached the minimum treshold.
Returns:
bool: True if treshold reached, false otherwise.
"""
for route in self.__routing_options.routes:
if re.compile(route).match(scraped_request.url):
count_key = str(route) + scraped_request.method
if count_key in self.__routing_count.keys():
return self.__routing_count[count_key] >= self.__routing_options.minimum_threshold
return False |
def getMetadataId(self, metadata):
"""
Returns the id of a metadata
"""
return str(datamodel.VariantSetMetadataCompoundId(
self.getCompoundId(), 'metadata:' + metadata.key)) | Returns the id of a metadata | Below is the the instruction that describes the task:
### Input:
Returns the id of a metadata
### Response:
def getMetadataId(self, metadata):
"""
Returns the id of a metadata
"""
return str(datamodel.VariantSetMetadataCompoundId(
self.getCompoundId(), 'metadata:' + metadata.key)) |
def get_connection_id(self, conn_or_int_id):
"""Get the connection id.
Args:
conn_or_int_id (int, string): The external integer connection id or
and internal string connection id
Returns:
dict: The context data associated with that connection or None if it cannot
be found.
Raises:
ArgumentError: When the key is not found in the list of active connections
or is invalid.
"""
key = conn_or_int_id
if isinstance(key, str):
table = self._int_connections
elif isinstance(key, int):
table = self._connections
else:
raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key)
try:
data = table[key]
except KeyError:
raise ArgumentError("Could not find connection by id", id=key)
return data['conn_id'] | Get the connection id.
Args:
conn_or_int_id (int, string): The external integer connection id or
and internal string connection id
Returns:
dict: The context data associated with that connection or None if it cannot
be found.
Raises:
ArgumentError: When the key is not found in the list of active connections
or is invalid. | Below is the the instruction that describes the task:
### Input:
Get the connection id.
Args:
conn_or_int_id (int, string): The external integer connection id or
and internal string connection id
Returns:
dict: The context data associated with that connection or None if it cannot
be found.
Raises:
ArgumentError: When the key is not found in the list of active connections
or is invalid.
### Response:
def get_connection_id(self, conn_or_int_id):
"""Get the connection id.
Args:
conn_or_int_id (int, string): The external integer connection id or
and internal string connection id
Returns:
dict: The context data associated with that connection or None if it cannot
be found.
Raises:
ArgumentError: When the key is not found in the list of active connections
or is invalid.
"""
key = conn_or_int_id
if isinstance(key, str):
table = self._int_connections
elif isinstance(key, int):
table = self._connections
else:
raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key)
try:
data = table[key]
except KeyError:
raise ArgumentError("Could not find connection by id", id=key)
return data['conn_id'] |
def cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w):
""""
Initialize a filter descriptor.
This function initializes a previously created filter descriptor object into a 4D filter.
Filters layout must be contiguous in memory.
Parameters
----------
wDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
dataType : cudnnDataType
Data type.
format: cudnnTensorFormat
Tensor format
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter.
"""
status = _libcudnn.cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w)
cudnnCheckStatus(status) | Initialize a filter descriptor.
This function initializes a previously created filter descriptor object into a 4D filter.
Filters layout must be contiguous in memory.
Parameters
----------
wDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
dataType : cudnnDataType
Data type.
format: cudnnTensorFormat
Tensor format
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter. | Below is the the instruction that describes the task:
### Input:
Initialize a filter descriptor.
This function initializes a previously created filter descriptor object into a 4D filter.
Filters layout must be contiguous in memory.
Parameters
----------
wDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
dataType : cudnnDataType
Data type.
format: cudnnTensorFormat
Tensor format
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter.
### Response:
def cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w):
""""
Initialize a filter descriptor.
This function initializes a previously created filter descriptor object into a 4D filter.
Filters layout must be contiguous in memory.
Parameters
----------
wDesc : cudnnFilterDescriptor
Handle to a previously created filter descriptor.
dataType : cudnnDataType
Data type.
format: cudnnTensorFormat
Tensor format
k : int
Number of output feature maps.
c : int
Number of input feature maps.
h : int
Height of each filter.
w : int
Width of each filter.
"""
status = _libcudnn.cudnnSetFilter4dDescriptor(wDesc, dataType, format, k, c, h, w)
cudnnCheckStatus(status) |
def declare_internal(self, function):
"""Like declare(), but the registered function will also receive the
current namespace as its first argument. Useful for functions that
inspect the state of the compilation, like ``variable-exists()``.
Probably not so useful for anything else.
"""
function._pyscss_needs_namespace = True
self._auto_register_function(function, function.__name__, 1)
return function | Like declare(), but the registered function will also receive the
current namespace as its first argument. Useful for functions that
inspect the state of the compilation, like ``variable-exists()``.
Probably not so useful for anything else. | Below is the the instruction that describes the task:
### Input:
Like declare(), but the registered function will also receive the
current namespace as its first argument. Useful for functions that
inspect the state of the compilation, like ``variable-exists()``.
Probably not so useful for anything else.
### Response:
def declare_internal(self, function):
"""Like declare(), but the registered function will also receive the
current namespace as its first argument. Useful for functions that
inspect the state of the compilation, like ``variable-exists()``.
Probably not so useful for anything else.
"""
function._pyscss_needs_namespace = True
self._auto_register_function(function, function.__name__, 1)
return function |
def _nanmedian(array, axis=None):
"""Bottleneck nanmedian function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
return bottleneck.nanmedian(array, axis=axis) | Bottleneck nanmedian function that handle tuple axis. | Below is the the instruction that describes the task:
### Input:
Bottleneck nanmedian function that handle tuple axis.
### Response:
def _nanmedian(array, axis=None):
"""Bottleneck nanmedian function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
return bottleneck.nanmedian(array, axis=axis) |
def input_identity(interface = TerminalInterface()):
"""Get the full name, email address and SMTP information from the user."""
while True:
identity = interface.input_fields("""
In order to send your files via email, I need to get your name and
email address you will be using to send the files.""",
( 'name', 'Your full name', 'string' ),
( 'email', 'Your email address', 'string' ))
try:
(localpart, hostname) = identity['email'].split('@')
break
except ValueError:
interface.error("""
I couldn't understand the email address you entered, please try
again.""")
while True:
# Configure the SMTP information
smtp_details = interface.input_fields("""
I need details of the SMTP server used to send email for your email
address '%s'. These values can be obtained from the administrators of
your email account.
Most of the time, the default options should suffice if you are
using a free email provider such as GMail.""" % identity['email'],
( 'host', 'The SMTP server hostname', 'string', 'smtp.' + hostname),
( 'port', 'The SMTP server port', 'integer', 465),
( 'use_ssl', 'Use SSL to connect', 'boolean', True),
( 'use_tls', 'Use TLS after connecting', 'boolean', False),
( 'use_auth', 'Use a username/password to log in', 'boolean', True)
)
if smtp_details['use_auth']:
credentials = interface.input_fields("""
I need the username and password you use to log into the SMTP
server, if you provide a blank password, I'll assume you want me to
ask you each time I try to send an email for your password. This is
a more secure option but may be tiresome.""",
( 'username', 'Your username', 'string', localpart),
( 'password', 'Your password', 'password' ))
if credentials['password'] == '':
credentials['password'] = None
smtp_details['username'] = credentials['username']
smtp_details['password'] = credentials['password']
new_identity = Identity(identity['name'], identity['email'], **smtp_details)
# Ask if we want to send a test email.
interface.new_section()
interface.message("""I can try sending a test email to yourself with
all the SMTP settings you've given me. This is generally a good idea
because if we correct any mistakes now, you don't need to correct them
when you want to send a file.""")
if interface.input_boolean('Try sending a test email?', default=True):
if new_identity.send_test_email():
return new_identity
interface.message("""Sending the test email failed. You can go back
and try re-entering your SMTP server details now if you wish.""")
if not interface.input_boolean('Re-enter SMTP server details', default=True):
return new_identity | Get the full name, email address and SMTP information from the user. | Below is the the instruction that describes the task:
### Input:
Get the full name, email address and SMTP information from the user.
### Response:
def input_identity(interface = TerminalInterface()):
"""Get the full name, email address and SMTP information from the user."""
while True:
identity = interface.input_fields("""
In order to send your files via email, I need to get your name and
email address you will be using to send the files.""",
( 'name', 'Your full name', 'string' ),
( 'email', 'Your email address', 'string' ))
try:
(localpart, hostname) = identity['email'].split('@')
break
except ValueError:
interface.error("""
I couldn't understand the email address you entered, please try
again.""")
while True:
# Configure the SMTP information
smtp_details = interface.input_fields("""
I need details of the SMTP server used to send email for your email
address '%s'. These values can be obtained from the administrators of
your email account.
Most of the time, the default options should suffice if you are
using a free email provider such as GMail.""" % identity['email'],
( 'host', 'The SMTP server hostname', 'string', 'smtp.' + hostname),
( 'port', 'The SMTP server port', 'integer', 465),
( 'use_ssl', 'Use SSL to connect', 'boolean', True),
( 'use_tls', 'Use TLS after connecting', 'boolean', False),
( 'use_auth', 'Use a username/password to log in', 'boolean', True)
)
if smtp_details['use_auth']:
credentials = interface.input_fields("""
I need the username and password you use to log into the SMTP
server, if you provide a blank password, I'll assume you want me to
ask you each time I try to send an email for your password. This is
a more secure option but may be tiresome.""",
( 'username', 'Your username', 'string', localpart),
( 'password', 'Your password', 'password' ))
if credentials['password'] == '':
credentials['password'] = None
smtp_details['username'] = credentials['username']
smtp_details['password'] = credentials['password']
new_identity = Identity(identity['name'], identity['email'], **smtp_details)
# Ask if we want to send a test email.
interface.new_section()
interface.message("""I can try sending a test email to yourself with
all the SMTP settings you've given me. This is generally a good idea
because if we correct any mistakes now, you don't need to correct them
when you want to send a file.""")
if interface.input_boolean('Try sending a test email?', default=True):
if new_identity.send_test_email():
return new_identity
interface.message("""Sending the test email failed. You can go back
and try re-entering your SMTP server details now if you wish.""")
if not interface.input_boolean('Re-enter SMTP server details', default=True):
return new_identity |
def build_inside(input_method, input_args=None, substitutions=None):
"""
use requested input plugin to load configuration and then initiate build
"""
def process_keyvals(keyvals):
""" ["key=val", "x=y"] -> {"key": "val", "x": "y"} """
keyvals = keyvals or []
processed_keyvals = {}
for arg in keyvals:
key, value = arg.split("=", 1)
processed_keyvals[key] = value
return processed_keyvals
main = __name__.split('.', 1)[0]
log_encoding = get_logging_encoding(main)
logger.info("log encoding: %s", log_encoding)
if not input_method:
raise RuntimeError("No input method specified!")
logger.debug("getting build json from input %s", input_method)
cleaned_input_args = process_keyvals(input_args)
cleaned_input_args['substitutions'] = process_keyvals(substitutions)
input_runner = InputPluginsRunner([{'name': input_method,
'args': cleaned_input_args}])
build_json = input_runner.run()[input_method]
if isinstance(build_json, Exception):
raise RuntimeError("Input plugin raised exception: {}".format(build_json))
logger.debug("build json: %s", build_json)
if not build_json:
raise RuntimeError("No valid build json!")
if not isinstance(build_json, dict):
raise RuntimeError("Input plugin did not return valid build json: {}".format(build_json))
dbw = DockerBuildWorkflow(**build_json)
try:
build_result = dbw.build_docker_image()
except Exception as e:
logger.error('image build failed: %s', e)
raise
else:
if not build_result or build_result.is_failed():
raise RuntimeError("no image built")
else:
logger.info("build has finished successfully \\o/") | use requested input plugin to load configuration and then initiate build | Below is the the instruction that describes the task:
### Input:
use requested input plugin to load configuration and then initiate build
### Response:
def build_inside(input_method, input_args=None, substitutions=None):
"""
use requested input plugin to load configuration and then initiate build
"""
def process_keyvals(keyvals):
""" ["key=val", "x=y"] -> {"key": "val", "x": "y"} """
keyvals = keyvals or []
processed_keyvals = {}
for arg in keyvals:
key, value = arg.split("=", 1)
processed_keyvals[key] = value
return processed_keyvals
main = __name__.split('.', 1)[0]
log_encoding = get_logging_encoding(main)
logger.info("log encoding: %s", log_encoding)
if not input_method:
raise RuntimeError("No input method specified!")
logger.debug("getting build json from input %s", input_method)
cleaned_input_args = process_keyvals(input_args)
cleaned_input_args['substitutions'] = process_keyvals(substitutions)
input_runner = InputPluginsRunner([{'name': input_method,
'args': cleaned_input_args}])
build_json = input_runner.run()[input_method]
if isinstance(build_json, Exception):
raise RuntimeError("Input plugin raised exception: {}".format(build_json))
logger.debug("build json: %s", build_json)
if not build_json:
raise RuntimeError("No valid build json!")
if not isinstance(build_json, dict):
raise RuntimeError("Input plugin did not return valid build json: {}".format(build_json))
dbw = DockerBuildWorkflow(**build_json)
try:
build_result = dbw.build_docker_image()
except Exception as e:
logger.error('image build failed: %s', e)
raise
else:
if not build_result or build_result.is_failed():
raise RuntimeError("no image built")
else:
logger.info("build has finished successfully \\o/") |
def from_range(cls, range_list, register_flag=True):
""" core class method to create visible objects from a range (nested list) """
s = dict_from_range(range_list)
obj = cls.from_serializable(s, register_flag)
return obj | core class method to create visible objects from a range (nested list) | Below is the the instruction that describes the task:
### Input:
core class method to create visible objects from a range (nested list)
### Response:
def from_range(cls, range_list, register_flag=True):
""" core class method to create visible objects from a range (nested list) """
s = dict_from_range(range_list)
obj = cls.from_serializable(s, register_flag)
return obj |
def log_analyzer(path):
"""This procedure replaces every line which can't be parsed
with special object MalformedLogEntry.
"""
with handle(MalformedLogEntryError,
lambda (c):
invoke_restart('use_value',
MalformedLogEntry(c.text))):
for filename in find_all_logs(path):
analyze_log(filename) | This procedure replaces every line which can't be parsed
with special object MalformedLogEntry. | Below is the the instruction that describes the task:
### Input:
This procedure replaces every line which can't be parsed
with special object MalformedLogEntry.
### Response:
def log_analyzer(path):
"""This procedure replaces every line which can't be parsed
with special object MalformedLogEntry.
"""
with handle(MalformedLogEntryError,
lambda (c):
invoke_restart('use_value',
MalformedLogEntry(c.text))):
for filename in find_all_logs(path):
analyze_log(filename) |
def _get_blockade_id_from_cwd(self, cwd=None):
'''Generate a new blockade ID based on the CWD'''
if not cwd:
cwd = os.getcwd()
# this follows a similar pattern as docker-compose uses
parent_dir = os.path.abspath(cwd)
basename = os.path.basename(parent_dir).lower()
blockade_id = re.sub(r"[^a-z0-9]", "", basename)
if not blockade_id: # if we can't get a valid name from CWD, use "default"
blockade_id = "default"
return blockade_id | Generate a new blockade ID based on the CWD | Below is the the instruction that describes the task:
### Input:
Generate a new blockade ID based on the CWD
### Response:
def _get_blockade_id_from_cwd(self, cwd=None):
'''Generate a new blockade ID based on the CWD'''
if not cwd:
cwd = os.getcwd()
# this follows a similar pattern as docker-compose uses
parent_dir = os.path.abspath(cwd)
basename = os.path.basename(parent_dir).lower()
blockade_id = re.sub(r"[^a-z0-9]", "", basename)
if not blockade_id: # if we can't get a valid name from CWD, use "default"
blockade_id = "default"
return blockade_id |
def feature_distribution(self, featureset_name, feature, mode='counts',
**slice_kwargs):
"""
Calculates the distribution of a feature across slices of the corpus.
Examples
--------
.. code-block:: python
>>> corpus.feature_distribution(featureset_name='citations', \
... feature='DOLE RJ 1965 CELL', \
... step_size=1, window_size=1)
[2, 15, 25, 1]
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
feature : str
Name of the specific feature of interest. E.g. if
``featureset_name='citations'``, then ``feature`` could be
something like ``'DOLE RJ 1965 CELL'``.
mode : str
(default: ``'counts'``) If set to ``'counts'``, values will be the
sum of all count values for the feature in each slice. If set to
``'documentCounts'``, values will be the number of papers in which
the feature occurs in each slice.
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list
"""
values = []
keys = []
fset = self.features[featureset_name]
for key, papers in self.slice(subcorpus=False, **slice_kwargs):
allfeatures = [v for v
in chain(*[fset.features[self._generate_index(p)]
for p in papers
if self._generate_index(p)
in fset.features])]
if len(allfeatures) < 1:
keys.append(key)
values.append(0.)
continue
count = 0.
for elem, v in allfeatures:
if elem != feature:
continue
if mode == 'counts':
count += v
else:
count += 1.
values.append(count)
keys.append(key)
return keys, values | Calculates the distribution of a feature across slices of the corpus.
Examples
--------
.. code-block:: python
>>> corpus.feature_distribution(featureset_name='citations', \
... feature='DOLE RJ 1965 CELL', \
... step_size=1, window_size=1)
[2, 15, 25, 1]
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
feature : str
Name of the specific feature of interest. E.g. if
``featureset_name='citations'``, then ``feature`` could be
something like ``'DOLE RJ 1965 CELL'``.
mode : str
(default: ``'counts'``) If set to ``'counts'``, values will be the
sum of all count values for the feature in each slice. If set to
``'documentCounts'``, values will be the number of papers in which
the feature occurs in each slice.
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list | Below is the the instruction that describes the task:
### Input:
Calculates the distribution of a feature across slices of the corpus.
Examples
--------
.. code-block:: python
>>> corpus.feature_distribution(featureset_name='citations', \
... feature='DOLE RJ 1965 CELL', \
... step_size=1, window_size=1)
[2, 15, 25, 1]
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
feature : str
Name of the specific feature of interest. E.g. if
``featureset_name='citations'``, then ``feature`` could be
something like ``'DOLE RJ 1965 CELL'``.
mode : str
(default: ``'counts'``) If set to ``'counts'``, values will be the
sum of all count values for the feature in each slice. If set to
``'documentCounts'``, values will be the number of papers in which
the feature occurs in each slice.
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list
### Response:
def feature_distribution(self, featureset_name, feature, mode='counts',
**slice_kwargs):
"""
Calculates the distribution of a feature across slices of the corpus.
Examples
--------
.. code-block:: python
>>> corpus.feature_distribution(featureset_name='citations', \
... feature='DOLE RJ 1965 CELL', \
... step_size=1, window_size=1)
[2, 15, 25, 1]
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
feature : str
Name of the specific feature of interest. E.g. if
``featureset_name='citations'``, then ``feature`` could be
something like ``'DOLE RJ 1965 CELL'``.
mode : str
(default: ``'counts'``) If set to ``'counts'``, values will be the
sum of all count values for the feature in each slice. If set to
``'documentCounts'``, values will be the number of papers in which
the feature occurs in each slice.
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list
"""
values = []
keys = []
fset = self.features[featureset_name]
for key, papers in self.slice(subcorpus=False, **slice_kwargs):
allfeatures = [v for v
in chain(*[fset.features[self._generate_index(p)]
for p in papers
if self._generate_index(p)
in fset.features])]
if len(allfeatures) < 1:
keys.append(key)
values.append(0.)
continue
count = 0.
for elem, v in allfeatures:
if elem != feature:
continue
if mode == 'counts':
count += v
else:
count += 1.
values.append(count)
keys.append(key)
return keys, values |
def _get_value_error_message_for_invalid_prarameter(self, parameter, value):
"""Returns the ValueError message for the given parameter.
:param string parameter: Name of the parameter the message has to be created for.
:param numeric value: Value outside the parameters interval.
:return: Returns a string containing hte message.
:rtype: string
"""
# return if not interval is defined for the parameter
if parameter not in self._parameterIntervals:
return
interval = self._parameterIntervals[parameter]
return "%s has to be in %s%s, %s%s. Current value is %s." % (
parameter,
BaseMethod._interval_definitions[interval[2]][0],
interval[0], interval[1],
BaseMethod._interval_definitions[interval[3]][1],
value
) | Returns the ValueError message for the given parameter.
:param string parameter: Name of the parameter the message has to be created for.
:param numeric value: Value outside the parameters interval.
:return: Returns a string containing hte message.
:rtype: string | Below is the the instruction that describes the task:
### Input:
Returns the ValueError message for the given parameter.
:param string parameter: Name of the parameter the message has to be created for.
:param numeric value: Value outside the parameters interval.
:return: Returns a string containing hte message.
:rtype: string
### Response:
def _get_value_error_message_for_invalid_prarameter(self, parameter, value):
"""Returns the ValueError message for the given parameter.
:param string parameter: Name of the parameter the message has to be created for.
:param numeric value: Value outside the parameters interval.
:return: Returns a string containing hte message.
:rtype: string
"""
# return if not interval is defined for the parameter
if parameter not in self._parameterIntervals:
return
interval = self._parameterIntervals[parameter]
return "%s has to be in %s%s, %s%s. Current value is %s." % (
parameter,
BaseMethod._interval_definitions[interval[2]][0],
interval[0], interval[1],
BaseMethod._interval_definitions[interval[3]][1],
value
) |
def load_hvjs(cls, logo=False, bokeh_logo=False, mpl_logo=False, plotly_logo=False,
JS=True, message='HoloViewsJS successfully loaded.'):
"""
Displays javascript and CSS to initialize HoloViews widgets.
"""
import jinja2
# Evaluate load_notebook.html template with widgetjs code
if JS:
widgetjs, widgetcss = Renderer.html_assets(extras=False, backends=[], script=True)
else:
widgetjs, widgetcss = '', ''
# Add classic notebook MIME renderer
widgetjs += nb_mime_js
templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
jinjaEnv = jinja2.Environment(loader=templateLoader)
template = jinjaEnv.get_template('load_notebook.html')
html = template.render({'widgetcss': widgetcss,
'logo': logo,
'bokeh_logo': bokeh_logo,
'mpl_logo': mpl_logo,
'plotly_logo': plotly_logo,
'message': message})
publish_display_data(data={'text/html': html})
# Vanilla JS mime type is only consumed by classic notebook
# Custom mime type is only consumed by JupyterLab
if JS:
mimebundle = {
MIME_TYPES['js'] : widgetjs,
MIME_TYPES['jlab-hv-load'] : widgetjs
}
if os.environ.get('HV_DOC_HTML', False):
mimebundle = {'text/html': mimebundle_to_html(mimebundle)}
publish_display_data(data=mimebundle) | Displays javascript and CSS to initialize HoloViews widgets. | Below is the the instruction that describes the task:
### Input:
Displays javascript and CSS to initialize HoloViews widgets.
### Response:
def load_hvjs(cls, logo=False, bokeh_logo=False, mpl_logo=False, plotly_logo=False,
JS=True, message='HoloViewsJS successfully loaded.'):
"""
Displays javascript and CSS to initialize HoloViews widgets.
"""
import jinja2
# Evaluate load_notebook.html template with widgetjs code
if JS:
widgetjs, widgetcss = Renderer.html_assets(extras=False, backends=[], script=True)
else:
widgetjs, widgetcss = '', ''
# Add classic notebook MIME renderer
widgetjs += nb_mime_js
templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
jinjaEnv = jinja2.Environment(loader=templateLoader)
template = jinjaEnv.get_template('load_notebook.html')
html = template.render({'widgetcss': widgetcss,
'logo': logo,
'bokeh_logo': bokeh_logo,
'mpl_logo': mpl_logo,
'plotly_logo': plotly_logo,
'message': message})
publish_display_data(data={'text/html': html})
# Vanilla JS mime type is only consumed by classic notebook
# Custom mime type is only consumed by JupyterLab
if JS:
mimebundle = {
MIME_TYPES['js'] : widgetjs,
MIME_TYPES['jlab-hv-load'] : widgetjs
}
if os.environ.get('HV_DOC_HTML', False):
mimebundle = {'text/html': mimebundle_to_html(mimebundle)}
publish_display_data(data=mimebundle) |
def saveProfile(self, key, settings=None):
""" Writes the view settings to the persistent store
:param key: key where the setting will be read from
:param settings: optional QSettings object which can have a group already opened.
"""
#logger.debug("Writing view settings for: {}".format(key))
if settings is None:
settings = QtCore.QSettings()
settings.setValue(key, self.horizontalHeader().saveState()) | Writes the view settings to the persistent store
:param key: key where the setting will be read from
:param settings: optional QSettings object which can have a group already opened. | Below is the the instruction that describes the task:
### Input:
Writes the view settings to the persistent store
:param key: key where the setting will be read from
:param settings: optional QSettings object which can have a group already opened.
### Response:
def saveProfile(self, key, settings=None):
""" Writes the view settings to the persistent store
:param key: key where the setting will be read from
:param settings: optional QSettings object which can have a group already opened.
"""
#logger.debug("Writing view settings for: {}".format(key))
if settings is None:
settings = QtCore.QSettings()
settings.setValue(key, self.horizontalHeader().saveState()) |
def _loader(app, init_func, entry_points=None, modules=None):
"""Run generic loader.
Used to load and initialize entry points and modules using an custom
initialization function.
.. versionadded: 1.0.0
"""
if entry_points:
for entry_point in entry_points:
for ep in pkg_resources.iter_entry_points(entry_point):
try:
init_func(ep.load())
except Exception:
app.logger.error(
'Failed to initialize entry point: {0}'.format(ep))
raise
if modules:
for m in modules:
try:
init_func(m)
except Exception:
app.logger.error('Failed to initialize module: {0}'.format(m))
raise | Run generic loader.
Used to load and initialize entry points and modules using an custom
initialization function.
.. versionadded: 1.0.0 | Below is the the instruction that describes the task:
### Input:
Run generic loader.
Used to load and initialize entry points and modules using an custom
initialization function.
.. versionadded: 1.0.0
### Response:
def _loader(app, init_func, entry_points=None, modules=None):
"""Run generic loader.
Used to load and initialize entry points and modules using an custom
initialization function.
.. versionadded: 1.0.0
"""
if entry_points:
for entry_point in entry_points:
for ep in pkg_resources.iter_entry_points(entry_point):
try:
init_func(ep.load())
except Exception:
app.logger.error(
'Failed to initialize entry point: {0}'.format(ep))
raise
if modules:
for m in modules:
try:
init_func(m)
except Exception:
app.logger.error('Failed to initialize module: {0}'.format(m))
raise |
def _build_table_options(self, row):
""" Setup the mostly-non-schema table options, like caching settings """
return dict((o, row.get(o)) for o in self.recognized_table_options if o in row) | Setup the mostly-non-schema table options, like caching settings | Below is the the instruction that describes the task:
### Input:
Setup the mostly-non-schema table options, like caching settings
### Response:
def _build_table_options(self, row):
""" Setup the mostly-non-schema table options, like caching settings """
return dict((o, row.get(o)) for o in self.recognized_table_options if o in row) |
def wt(u, v, dfs_data):
"""The wt_u[v] function used in the paper."""
# Determine the edge_id
edge_id = dfs_data['graph'].get_first_edge_id_by_node_ids(u, v)
# Pull the weight of that edge
return dfs_data['edge_weights'][edge_id] | The wt_u[v] function used in the paper. | Below is the the instruction that describes the task:
### Input:
The wt_u[v] function used in the paper.
### Response:
def wt(u, v, dfs_data):
"""The wt_u[v] function used in the paper."""
# Determine the edge_id
edge_id = dfs_data['graph'].get_first_edge_id_by_node_ids(u, v)
# Pull the weight of that edge
return dfs_data['edge_weights'][edge_id] |
def run(self, repo: str, branch: str, task: Task, git_repo: Repo, repo_path: Path) -> Result:
""" Gets or builds an image for the repo, gets or starts a container for the image and runs the script.
:param repo: Repository URL
:param branch: Branch ane
:param task: :class:`Task` to run.
:param git_repo: :class:`Repo <git.repo.base.Repo>` of the cloned repository.
:param repo_path: :class:`Path <pathlib.Path>` to the cloned location.
"""
self.check_docker_access()
container_name = self.get_container_name(repo, branch, git_repo)
container = self.container_running(container_name)
if container is None:
image = self.get_image_for_repo(repo, branch, git_repo, repo_path)
container = self.start_container(image, container_name, repo_path)
task_filename, task_json = self.serialized_task(task)
container.put_archive("/srv/scripts", self.tar_task_definition(task_filename, task_json))
res = None
try:
command = ["timeout"]
if self.inherit_image:
if self.alpine_inherited or b"Alpine" in container.exec_run(["cat", "/etc/issue"], tty=True).output:
self.alpine_inherited = True
command = ["timeout", "-t"]
command += [str(task.timeout),
"python",
"/srv/scripts/runner.py",
f"/srv/scripts/{task_filename}"]
logger.debug("Running command %s", " ".join(command))
res = container.exec_run(command, tty=True)
# 124 is the standard, 143 on alpine
if res.exit_code in {124, 143}:
raise BuildTimeoutError(f"The task timeouted after {task.timeout} seconds.")
return Result(res.output)
except BuildError: # can be raised by :meth:`Result.__init__`
raise
except Exception as e:
logger.exception(e)
if res is not None:
logger.warning(res.output)
raise BuildError("The build failed", extra_info={
"exception": e,
"output": res if res is None else res.output
})
finally:
if not self.keep_container_running:
container.kill(signal.SIGKILL)
else:
self._containers.add(container) | Gets or builds an image for the repo, gets or starts a container for the image and runs the script.
:param repo: Repository URL
:param branch: Branch ane
:param task: :class:`Task` to run.
:param git_repo: :class:`Repo <git.repo.base.Repo>` of the cloned repository.
:param repo_path: :class:`Path <pathlib.Path>` to the cloned location. | Below is the the instruction that describes the task:
### Input:
Gets or builds an image for the repo, gets or starts a container for the image and runs the script.
:param repo: Repository URL
:param branch: Branch ane
:param task: :class:`Task` to run.
:param git_repo: :class:`Repo <git.repo.base.Repo>` of the cloned repository.
:param repo_path: :class:`Path <pathlib.Path>` to the cloned location.
### Response:
def run(self, repo: str, branch: str, task: Task, git_repo: Repo, repo_path: Path) -> Result:
""" Gets or builds an image for the repo, gets or starts a container for the image and runs the script.
:param repo: Repository URL
:param branch: Branch ane
:param task: :class:`Task` to run.
:param git_repo: :class:`Repo <git.repo.base.Repo>` of the cloned repository.
:param repo_path: :class:`Path <pathlib.Path>` to the cloned location.
"""
self.check_docker_access()
container_name = self.get_container_name(repo, branch, git_repo)
container = self.container_running(container_name)
if container is None:
image = self.get_image_for_repo(repo, branch, git_repo, repo_path)
container = self.start_container(image, container_name, repo_path)
task_filename, task_json = self.serialized_task(task)
container.put_archive("/srv/scripts", self.tar_task_definition(task_filename, task_json))
res = None
try:
command = ["timeout"]
if self.inherit_image:
if self.alpine_inherited or b"Alpine" in container.exec_run(["cat", "/etc/issue"], tty=True).output:
self.alpine_inherited = True
command = ["timeout", "-t"]
command += [str(task.timeout),
"python",
"/srv/scripts/runner.py",
f"/srv/scripts/{task_filename}"]
logger.debug("Running command %s", " ".join(command))
res = container.exec_run(command, tty=True)
# 124 is the standard, 143 on alpine
if res.exit_code in {124, 143}:
raise BuildTimeoutError(f"The task timeouted after {task.timeout} seconds.")
return Result(res.output)
except BuildError: # can be raised by :meth:`Result.__init__`
raise
except Exception as e:
logger.exception(e)
if res is not None:
logger.warning(res.output)
raise BuildError("The build failed", extra_info={
"exception": e,
"output": res if res is None else res.output
})
finally:
if not self.keep_container_running:
container.kill(signal.SIGKILL)
else:
self._containers.add(container) |
def _get_fault_type_hanging_wall(self, rake):
"""
Return fault type (F) and hanging wall (HW) flags depending on rake
angle.
The method assumes 'reverse' (F = 1) if 45 <= rake <= 135, 'other'
(F = 0) if otherwise. Hanging-wall flag is set to 1 if 'reverse',
and 0 if 'other'.
"""
F, HW = 0, 0
if 45 <= rake <= 135:
F, HW = 1, 1
return F, HW | Return fault type (F) and hanging wall (HW) flags depending on rake
angle.
The method assumes 'reverse' (F = 1) if 45 <= rake <= 135, 'other'
(F = 0) if otherwise. Hanging-wall flag is set to 1 if 'reverse',
and 0 if 'other'. | Below is the the instruction that describes the task:
### Input:
Return fault type (F) and hanging wall (HW) flags depending on rake
angle.
The method assumes 'reverse' (F = 1) if 45 <= rake <= 135, 'other'
(F = 0) if otherwise. Hanging-wall flag is set to 1 if 'reverse',
and 0 if 'other'.
### Response:
def _get_fault_type_hanging_wall(self, rake):
"""
Return fault type (F) and hanging wall (HW) flags depending on rake
angle.
The method assumes 'reverse' (F = 1) if 45 <= rake <= 135, 'other'
(F = 0) if otherwise. Hanging-wall flag is set to 1 if 'reverse',
and 0 if 'other'.
"""
F, HW = 0, 0
if 45 <= rake <= 135:
F, HW = 1, 1
return F, HW |
def runInBackground(self, pollInterval=.1, encoding=False):
'''
runInBackground - Create a background thread which will manage this process, automatically read from streams, and perform any cleanups
The object returned is a "BackgroundTaskInfo" object, and represents the state of the process. It is updated automatically as the program runs,
and if stdout or stderr are streams, they are automatically read from and populated into this object.
@see BackgroundTaskInfo for more info or http://pythonhosted.org/python-subprocess2/subprocess2.BackgroundTask.html
@param pollInterval - Amount of idle time between polling
@param encoding - Default False. If provided, data will be decoded using the value of this field as the codec name (e.x. "utf-8"). Otherwise, data will be stored as bytes.
'''
from .BackgroundTask import BackgroundTaskThread
taskInfo = BackgroundTaskInfo(encoding)
thread = BackgroundTaskThread(self, taskInfo, pollInterval, encoding)
thread.start()
#thread.run() # Uncomment to use pdb debug (will not run in background)
return taskInfo | runInBackground - Create a background thread which will manage this process, automatically read from streams, and perform any cleanups
The object returned is a "BackgroundTaskInfo" object, and represents the state of the process. It is updated automatically as the program runs,
and if stdout or stderr are streams, they are automatically read from and populated into this object.
@see BackgroundTaskInfo for more info or http://pythonhosted.org/python-subprocess2/subprocess2.BackgroundTask.html
@param pollInterval - Amount of idle time between polling
@param encoding - Default False. If provided, data will be decoded using the value of this field as the codec name (e.x. "utf-8"). Otherwise, data will be stored as bytes. | Below is the the instruction that describes the task:
### Input:
runInBackground - Create a background thread which will manage this process, automatically read from streams, and perform any cleanups
The object returned is a "BackgroundTaskInfo" object, and represents the state of the process. It is updated automatically as the program runs,
and if stdout or stderr are streams, they are automatically read from and populated into this object.
@see BackgroundTaskInfo for more info or http://pythonhosted.org/python-subprocess2/subprocess2.BackgroundTask.html
@param pollInterval - Amount of idle time between polling
@param encoding - Default False. If provided, data will be decoded using the value of this field as the codec name (e.x. "utf-8"). Otherwise, data will be stored as bytes.
### Response:
def runInBackground(self, pollInterval=.1, encoding=False):
'''
runInBackground - Create a background thread which will manage this process, automatically read from streams, and perform any cleanups
The object returned is a "BackgroundTaskInfo" object, and represents the state of the process. It is updated automatically as the program runs,
and if stdout or stderr are streams, they are automatically read from and populated into this object.
@see BackgroundTaskInfo for more info or http://pythonhosted.org/python-subprocess2/subprocess2.BackgroundTask.html
@param pollInterval - Amount of idle time between polling
@param encoding - Default False. If provided, data will be decoded using the value of this field as the codec name (e.x. "utf-8"). Otherwise, data will be stored as bytes.
'''
from .BackgroundTask import BackgroundTaskThread
taskInfo = BackgroundTaskInfo(encoding)
thread = BackgroundTaskThread(self, taskInfo, pollInterval, encoding)
thread.start()
#thread.run() # Uncomment to use pdb debug (will not run in background)
return taskInfo |
def uploadDeviceConfig(self):
"""Upload the device configuration of the fake device
selected in the __init__ methodi to the google account."""
upload = googleplay_pb2.UploadDeviceConfigRequest()
upload.deviceConfiguration.CopyFrom(self.deviceBuilder.getDeviceConfig())
headers = self.getHeaders(upload_fields=True)
stringRequest = upload.SerializeToString()
response = requests.post(UPLOAD_URL, data=stringRequest,
headers=headers,
verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
response = googleplay_pb2.ResponseWrapper.FromString(response.content)
try:
if response.payload.HasField('uploadDeviceConfigResponse'):
self.device_config_token = response.payload.uploadDeviceConfigResponse
self.device_config_token = self.device_config_token.uploadDeviceConfigToken
except ValueError:
pass | Upload the device configuration of the fake device
selected in the __init__ methodi to the google account. | Below is the the instruction that describes the task:
### Input:
Upload the device configuration of the fake device
selected in the __init__ methodi to the google account.
### Response:
def uploadDeviceConfig(self):
"""Upload the device configuration of the fake device
selected in the __init__ methodi to the google account."""
upload = googleplay_pb2.UploadDeviceConfigRequest()
upload.deviceConfiguration.CopyFrom(self.deviceBuilder.getDeviceConfig())
headers = self.getHeaders(upload_fields=True)
stringRequest = upload.SerializeToString()
response = requests.post(UPLOAD_URL, data=stringRequest,
headers=headers,
verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
response = googleplay_pb2.ResponseWrapper.FromString(response.content)
try:
if response.payload.HasField('uploadDeviceConfigResponse'):
self.device_config_token = response.payload.uploadDeviceConfigResponse
self.device_config_token = self.device_config_token.uploadDeviceConfigToken
except ValueError:
pass |
def tryCommit(self, prepare: Prepare):
"""
Try to commit if the Prepare message is ready to be passed into the
commit phase.
"""
rv, reason = self.canCommit(prepare)
if rv:
self.doCommit(prepare)
else:
self.logger.debug("{} cannot send COMMIT since {}".format(self, reason)) | Try to commit if the Prepare message is ready to be passed into the
commit phase. | Below is the the instruction that describes the task:
### Input:
Try to commit if the Prepare message is ready to be passed into the
commit phase.
### Response:
def tryCommit(self, prepare: Prepare):
"""
Try to commit if the Prepare message is ready to be passed into the
commit phase.
"""
rv, reason = self.canCommit(prepare)
if rv:
self.doCommit(prepare)
else:
self.logger.debug("{} cannot send COMMIT since {}".format(self, reason)) |
def random_rescale_to_mahalanobis(self, x):
"""change `x` like for injection, all on genotypic level"""
x -= self.mean
if any(x):
x *= sum(self.randn(len(x))**2)**0.5 / self.mahalanobis_norm(x)
x += self.mean
return x | change `x` like for injection, all on genotypic level | Below is the the instruction that describes the task:
### Input:
change `x` like for injection, all on genotypic level
### Response:
def random_rescale_to_mahalanobis(self, x):
"""change `x` like for injection, all on genotypic level"""
x -= self.mean
if any(x):
x *= sum(self.randn(len(x))**2)**0.5 / self.mahalanobis_norm(x)
x += self.mean
return x |
def distance(self, other):
"""
Distance between the center of this region and another.
Parameters
----------
other : one region, or array-like
Either another region, or the center of another region.
"""
from numpy.linalg import norm
if isinstance(other, one):
other = other.center
return norm(self.center - asarray(other), ord=2) | Distance between the center of this region and another.
Parameters
----------
other : one region, or array-like
Either another region, or the center of another region. | Below is the the instruction that describes the task:
### Input:
Distance between the center of this region and another.
Parameters
----------
other : one region, or array-like
Either another region, or the center of another region.
### Response:
def distance(self, other):
"""
Distance between the center of this region and another.
Parameters
----------
other : one region, or array-like
Either another region, or the center of another region.
"""
from numpy.linalg import norm
if isinstance(other, one):
other = other.center
return norm(self.center - asarray(other), ord=2) |
def _url_to_prefix(node, id):
'''_url_to_prefix
Low-level api: Convert an identifier from `{namespace}tagname` notation
to `prefix:tagname` notation by looking at nsmap of the node. If the
identifier does not have a namespace, the identifier is simply returned
without modification.
Parameters
----------
node : `str`
A config node. Its identifier will be converted.
id : `str`
Identifier in `{namespace}tagname` notation.
Returns
-------
str
Identifier in `prefix:tagname` notation.
'''
prefixes = {v: k for k, v in node.nsmap.items()}
ret = re.search('^{(.+)}(.+)$', id)
if ret:
if ret.group(1) in prefixes:
if prefixes[ret.group(1)] is None:
return ret.group(2)
else:
return prefixes[ret.group(1)] + ':' + ret.group(2)
return id | _url_to_prefix
Low-level api: Convert an identifier from `{namespace}tagname` notation
to `prefix:tagname` notation by looking at nsmap of the node. If the
identifier does not have a namespace, the identifier is simply returned
without modification.
Parameters
----------
node : `str`
A config node. Its identifier will be converted.
id : `str`
Identifier in `{namespace}tagname` notation.
Returns
-------
str
Identifier in `prefix:tagname` notation. | Below is the the instruction that describes the task:
### Input:
_url_to_prefix
Low-level api: Convert an identifier from `{namespace}tagname` notation
to `prefix:tagname` notation by looking at nsmap of the node. If the
identifier does not have a namespace, the identifier is simply returned
without modification.
Parameters
----------
node : `str`
A config node. Its identifier will be converted.
id : `str`
Identifier in `{namespace}tagname` notation.
Returns
-------
str
Identifier in `prefix:tagname` notation.
### Response:
def _url_to_prefix(node, id):
'''_url_to_prefix
Low-level api: Convert an identifier from `{namespace}tagname` notation
to `prefix:tagname` notation by looking at nsmap of the node. If the
identifier does not have a namespace, the identifier is simply returned
without modification.
Parameters
----------
node : `str`
A config node. Its identifier will be converted.
id : `str`
Identifier in `{namespace}tagname` notation.
Returns
-------
str
Identifier in `prefix:tagname` notation.
'''
prefixes = {v: k for k, v in node.nsmap.items()}
ret = re.search('^{(.+)}(.+)$', id)
if ret:
if ret.group(1) in prefixes:
if prefixes[ret.group(1)] is None:
return ret.group(2)
else:
return prefixes[ret.group(1)] + ':' + ret.group(2)
return id |
def splitall(path):
"""Split a path into a list of directory names (and optionally a filename).
Parameters
----------
path: str
The path (absolute or relative).
Returns
-------
allparts: list[str]
List of directory names (and optionally a filename)
Example
-------
"foo/bar/baz.py" => ["foo", "bar", "baz.py"]
"/absolute/path.py" => ["/", "absolute", "baz.py"]
Notes
-----
Credit to Trent Mick. Taken from
https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s16.html
"""
allparts = []
while True:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts | Split a path into a list of directory names (and optionally a filename).
Parameters
----------
path: str
The path (absolute or relative).
Returns
-------
allparts: list[str]
List of directory names (and optionally a filename)
Example
-------
"foo/bar/baz.py" => ["foo", "bar", "baz.py"]
"/absolute/path.py" => ["/", "absolute", "baz.py"]
Notes
-----
Credit to Trent Mick. Taken from
https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s16.html | Below is the the instruction that describes the task:
### Input:
Split a path into a list of directory names (and optionally a filename).
Parameters
----------
path: str
The path (absolute or relative).
Returns
-------
allparts: list[str]
List of directory names (and optionally a filename)
Example
-------
"foo/bar/baz.py" => ["foo", "bar", "baz.py"]
"/absolute/path.py" => ["/", "absolute", "baz.py"]
Notes
-----
Credit to Trent Mick. Taken from
https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s16.html
### Response:
def splitall(path):
"""Split a path into a list of directory names (and optionally a filename).
Parameters
----------
path: str
The path (absolute or relative).
Returns
-------
allparts: list[str]
List of directory names (and optionally a filename)
Example
-------
"foo/bar/baz.py" => ["foo", "bar", "baz.py"]
"/absolute/path.py" => ["/", "absolute", "baz.py"]
Notes
-----
Credit to Trent Mick. Taken from
https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch04s16.html
"""
allparts = []
while True:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts |
def _requeue_trial(self, trial):
"""Notification to TrialScheduler and requeue trial.
This does not notify the SearchAlgorithm because the function
evaluation is still in progress.
"""
self._scheduler_alg.on_trial_error(self, trial)
self.trial_executor.set_status(trial, Trial.PENDING)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(self, trial) | Notification to TrialScheduler and requeue trial.
This does not notify the SearchAlgorithm because the function
evaluation is still in progress. | Below is the the instruction that describes the task:
### Input:
Notification to TrialScheduler and requeue trial.
This does not notify the SearchAlgorithm because the function
evaluation is still in progress.
### Response:
def _requeue_trial(self, trial):
"""Notification to TrialScheduler and requeue trial.
This does not notify the SearchAlgorithm because the function
evaluation is still in progress.
"""
self._scheduler_alg.on_trial_error(self, trial)
self.trial_executor.set_status(trial, Trial.PENDING)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(self, trial) |
def _ss_matrices(self,beta):
""" Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
"""
T = np.identity(self.state_no)
Z = self.X
R = np.identity(self.state_no)
Q = np.identity(self.state_no)
for i in range(0,self.state_no):
Q[i][i] = self.latent_variables.z_list[i].prior.transform(beta[i])
return T, Z, R, Q | Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q : np.array
State space matrices used in KFS algorithm | Below is the the instruction that describes the task:
### Input:
Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
### Response:
def _ss_matrices(self,beta):
""" Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
"""
T = np.identity(self.state_no)
Z = self.X
R = np.identity(self.state_no)
Q = np.identity(self.state_no)
for i in range(0,self.state_no):
Q[i][i] = self.latent_variables.z_list[i].prior.transform(beta[i])
return T, Z, R, Q |
def loop(self):
"""
Start all the added processes and multiplex their output onto the bound
printer (which by default will print to STDOUT).
If one process terminates, all the others will be terminated by
Honcho, and :func:`~honcho.manager.Manager.loop` will return.
This method will block until all the processes have terminated.
"""
def _terminate(signum, frame):
self._system_print("%s received\n" % SIGNALS[signum]['name'])
self.returncode = SIGNALS[signum]['rc']
self.terminate()
signal.signal(signal.SIGTERM, _terminate)
signal.signal(signal.SIGINT, _terminate)
self._start()
exit = False
exit_start = None
while 1:
try:
msg = self.events.get(timeout=0.1)
except Empty:
if exit:
break
else:
if msg.type == 'line':
self._printer.write(msg)
elif msg.type == 'start':
self._processes[msg.name]['pid'] = msg.data['pid']
self._system_print("%s started (pid=%s)\n"
% (msg.name, msg.data['pid']))
elif msg.type == 'stop':
self._processes[msg.name]['returncode'] = msg.data['returncode']
self._system_print("%s stopped (rc=%s)\n"
% (msg.name, msg.data['returncode']))
if self.returncode is None:
self.returncode = msg.data['returncode']
if self._all_started() and self._all_stopped():
exit = True
if exit_start is None and self._all_started() and self._any_stopped():
exit_start = self._env.now()
self.terminate()
if exit_start is not None:
# If we've been in this loop for more than KILL_WAIT seconds,
# it's time to kill all remaining children.
waiting = self._env.now() - exit_start
if waiting > datetime.timedelta(seconds=KILL_WAIT):
self.kill() | Start all the added processes and multiplex their output onto the bound
printer (which by default will print to STDOUT).
If one process terminates, all the others will be terminated by
Honcho, and :func:`~honcho.manager.Manager.loop` will return.
This method will block until all the processes have terminated. | Below is the the instruction that describes the task:
### Input:
Start all the added processes and multiplex their output onto the bound
printer (which by default will print to STDOUT).
If one process terminates, all the others will be terminated by
Honcho, and :func:`~honcho.manager.Manager.loop` will return.
This method will block until all the processes have terminated.
### Response:
def loop(self):
"""
Start all the added processes and multiplex their output onto the bound
printer (which by default will print to STDOUT).
If one process terminates, all the others will be terminated by
Honcho, and :func:`~honcho.manager.Manager.loop` will return.
This method will block until all the processes have terminated.
"""
def _terminate(signum, frame):
self._system_print("%s received\n" % SIGNALS[signum]['name'])
self.returncode = SIGNALS[signum]['rc']
self.terminate()
signal.signal(signal.SIGTERM, _terminate)
signal.signal(signal.SIGINT, _terminate)
self._start()
exit = False
exit_start = None
while 1:
try:
msg = self.events.get(timeout=0.1)
except Empty:
if exit:
break
else:
if msg.type == 'line':
self._printer.write(msg)
elif msg.type == 'start':
self._processes[msg.name]['pid'] = msg.data['pid']
self._system_print("%s started (pid=%s)\n"
% (msg.name, msg.data['pid']))
elif msg.type == 'stop':
self._processes[msg.name]['returncode'] = msg.data['returncode']
self._system_print("%s stopped (rc=%s)\n"
% (msg.name, msg.data['returncode']))
if self.returncode is None:
self.returncode = msg.data['returncode']
if self._all_started() and self._all_stopped():
exit = True
if exit_start is None and self._all_started() and self._any_stopped():
exit_start = self._env.now()
self.terminate()
if exit_start is not None:
# If we've been in this loop for more than KILL_WAIT seconds,
# it's time to kill all remaining children.
waiting = self._env.now() - exit_start
if waiting > datetime.timedelta(seconds=KILL_WAIT):
self.kill() |
def get_rate_limits(self):
"""
Returns a dict with the current rate limit information for domain
and status requests.
"""
resp, body = self.method_get("/limits")
rate_limits = body.get("limits", {}).get("rate")
ret = []
for rate_limit in rate_limits:
limits = rate_limit["limit"]
uri_limits = {"uri": rate_limit["uri"],
"limits": limits}
ret.append(uri_limits)
return ret | Returns a dict with the current rate limit information for domain
and status requests. | Below is the the instruction that describes the task:
### Input:
Returns a dict with the current rate limit information for domain
and status requests.
### Response:
def get_rate_limits(self):
"""
Returns a dict with the current rate limit information for domain
and status requests.
"""
resp, body = self.method_get("/limits")
rate_limits = body.get("limits", {}).get("rate")
ret = []
for rate_limit in rate_limits:
limits = rate_limit["limit"]
uri_limits = {"uri": rate_limit["uri"],
"limits": limits}
ret.append(uri_limits)
return ret |
def render_latex_sub_super(
name, subs=None, supers=None, translate_symbols=True, sep=','):
r'''Assemble a string from the primary name and the given sub- and
superscripts::
>>> render_latex_sub_super(name='alpha', subs=['mu', 'nu'], supers=[2])
'\\alpha_{\\mu,\\nu}^{2}'
>>> render_latex_sub_super(
... name='alpha', subs=['1', '2'], supers=['(1)'], sep='')
'\\alpha_{12}^{(1)}'
Args:
name (str): the string without the subscript/superscript
subs (list or None): list of subscripts
supers (list or None): list of superscripts
translate_symbols (bool): If True, try to translate (Greek) symbols in
`name, `subs`, and `supers` to unicode
sep (str): Separator to use if there are multiple
subscripts/superscripts
'''
if subs is None:
subs = []
if supers is None:
supers = []
if translate_symbols:
supers = [_translate_symbols(str(sup)) for sup in supers]
subs = [_translate_symbols(str(sub)) for sub in subs]
name = _translate_symbols(name)
res = name
sub = sep.join(subs)
sup = sep.join(supers)
if len(sub) > 0:
res += "_{%s}" % sub
if len(sup) > 0:
res += "^{%s}" % sup
return res | r'''Assemble a string from the primary name and the given sub- and
superscripts::
>>> render_latex_sub_super(name='alpha', subs=['mu', 'nu'], supers=[2])
'\\alpha_{\\mu,\\nu}^{2}'
>>> render_latex_sub_super(
... name='alpha', subs=['1', '2'], supers=['(1)'], sep='')
'\\alpha_{12}^{(1)}'
Args:
name (str): the string without the subscript/superscript
subs (list or None): list of subscripts
supers (list or None): list of superscripts
translate_symbols (bool): If True, try to translate (Greek) symbols in
`name, `subs`, and `supers` to unicode
sep (str): Separator to use if there are multiple
subscripts/superscripts | Below is the the instruction that describes the task:
### Input:
r'''Assemble a string from the primary name and the given sub- and
superscripts::
>>> render_latex_sub_super(name='alpha', subs=['mu', 'nu'], supers=[2])
'\\alpha_{\\mu,\\nu}^{2}'
>>> render_latex_sub_super(
... name='alpha', subs=['1', '2'], supers=['(1)'], sep='')
'\\alpha_{12}^{(1)}'
Args:
name (str): the string without the subscript/superscript
subs (list or None): list of subscripts
supers (list or None): list of superscripts
translate_symbols (bool): If True, try to translate (Greek) symbols in
`name, `subs`, and `supers` to unicode
sep (str): Separator to use if there are multiple
subscripts/superscripts
### Response:
def render_latex_sub_super(
name, subs=None, supers=None, translate_symbols=True, sep=','):
r'''Assemble a string from the primary name and the given sub- and
superscripts::
>>> render_latex_sub_super(name='alpha', subs=['mu', 'nu'], supers=[2])
'\\alpha_{\\mu,\\nu}^{2}'
>>> render_latex_sub_super(
... name='alpha', subs=['1', '2'], supers=['(1)'], sep='')
'\\alpha_{12}^{(1)}'
Args:
name (str): the string without the subscript/superscript
subs (list or None): list of subscripts
supers (list or None): list of superscripts
translate_symbols (bool): If True, try to translate (Greek) symbols in
`name, `subs`, and `supers` to unicode
sep (str): Separator to use if there are multiple
subscripts/superscripts
'''
if subs is None:
subs = []
if supers is None:
supers = []
if translate_symbols:
supers = [_translate_symbols(str(sup)) for sup in supers]
subs = [_translate_symbols(str(sub)) for sub in subs]
name = _translate_symbols(name)
res = name
sub = sep.join(subs)
sup = sep.join(supers)
if len(sub) > 0:
res += "_{%s}" % sub
if len(sup) > 0:
res += "^{%s}" % sup
return res |
def healpix_plot(self, healpix_expression="source_id/34359738368", healpix_max_level=12, healpix_level=8, what="count(*)", selection=None,
grid=None,
healpix_input="equatorial", healpix_output="galactic", f=None,
colormap="afmhot", grid_limits=None, image_size=800, nest=True,
figsize=None, interactive=False, title="", smooth=None, show=False, colorbar=True,
rotation=(0, 0, 0), **kwargs):
"""Viz data in 2d using a healpix column.
:param healpix_expression: {healpix_max_level}
:param healpix_max_level: {healpix_max_level}
:param healpix_level: {healpix_level}
:param what: {what}
:param selection: {selection}
:param grid: {grid}
:param healpix_input: Specificy if the healpix index is in "equatorial", "galactic" or "ecliptic".
:param healpix_output: Plot in "equatorial", "galactic" or "ecliptic".
:param f: function to apply to the data
:param colormap: matplotlib colormap
:param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid)))
:param image_size: size for the image that healpy uses for rendering
:param nest: If the healpix data is in nested (True) or ring (False)
:param figsize: If given, modify the matplotlib figure size. Example (14,9)
:param interactive: (Experimental, uses healpy.mollzoom is True)
:param title: Title of figure
:param smooth: apply gaussian smoothing, in degrees
:param show: Call matplotlib's show (True) or not (False, defaut)
:param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees.
:return:
"""
# plot_level = healpix_level #healpix_max_level-reduce_level
import healpy as hp
import pylab as plt
if grid is None:
reduce_level = healpix_max_level - healpix_level
NSIDE = 2**healpix_level
nmax = hp.nside2npix(NSIDE)
# print nmax, np.sqrt(nmax)
scaling = 4**reduce_level
# print nmax
epsilon = 1. / scaling / 2
grid = self._stat(what=what, binby="%s/%s" % (healpix_expression, scaling), limits=[-epsilon, nmax - epsilon], shape=nmax, selection=selection)
if grid_limits:
grid_min, grid_max = grid_limits
else:
grid_min = grid_max = None
f_org = f
f = _parse_f(f)
if smooth:
if nest:
grid = hp.reorder(grid, inp="NEST", out="RING")
nest = False
# grid[np.isnan(grid)] = np.nanmean(grid)
grid = hp.smoothing(grid, sigma=np.radians(smooth))
fgrid = f(grid)
coord_map = dict(equatorial='C', galactic='G', ecliptic="E")
fig = plt.gcf()
if figsize is not None:
fig.set_size_inches(*figsize)
what_label = what
if f_org:
what_label = f_org + " " + what_label
f = hp.mollzoom if interactive else hp.mollview
with warnings.catch_warnings():
warnings.simplefilter("ignore")
coord = coord_map[healpix_input], coord_map[healpix_output]
if coord_map[healpix_input] == coord_map[healpix_output]:
coord = None
f(fgrid, unit=what_label, rot=rotation, nest=nest, title=title, coord=coord,
cmap=colormap, hold=True, xsize=image_size, min=grid_min, max=grid_max, cbar=colorbar, **kwargs)
if show:
plt.show() | Viz data in 2d using a healpix column.
:param healpix_expression: {healpix_max_level}
:param healpix_max_level: {healpix_max_level}
:param healpix_level: {healpix_level}
:param what: {what}
:param selection: {selection}
:param grid: {grid}
:param healpix_input: Specificy if the healpix index is in "equatorial", "galactic" or "ecliptic".
:param healpix_output: Plot in "equatorial", "galactic" or "ecliptic".
:param f: function to apply to the data
:param colormap: matplotlib colormap
:param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid)))
:param image_size: size for the image that healpy uses for rendering
:param nest: If the healpix data is in nested (True) or ring (False)
:param figsize: If given, modify the matplotlib figure size. Example (14,9)
:param interactive: (Experimental, uses healpy.mollzoom is True)
:param title: Title of figure
:param smooth: apply gaussian smoothing, in degrees
:param show: Call matplotlib's show (True) or not (False, defaut)
:param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees.
:return: | Below is the the instruction that describes the task:
### Input:
Viz data in 2d using a healpix column.
:param healpix_expression: {healpix_max_level}
:param healpix_max_level: {healpix_max_level}
:param healpix_level: {healpix_level}
:param what: {what}
:param selection: {selection}
:param grid: {grid}
:param healpix_input: Specificy if the healpix index is in "equatorial", "galactic" or "ecliptic".
:param healpix_output: Plot in "equatorial", "galactic" or "ecliptic".
:param f: function to apply to the data
:param colormap: matplotlib colormap
:param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid)))
:param image_size: size for the image that healpy uses for rendering
:param nest: If the healpix data is in nested (True) or ring (False)
:param figsize: If given, modify the matplotlib figure size. Example (14,9)
:param interactive: (Experimental, uses healpy.mollzoom is True)
:param title: Title of figure
:param smooth: apply gaussian smoothing, in degrees
:param show: Call matplotlib's show (True) or not (False, defaut)
:param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees.
:return:
### Response:
def healpix_plot(self, healpix_expression="source_id/34359738368", healpix_max_level=12, healpix_level=8, what="count(*)", selection=None,
grid=None,
healpix_input="equatorial", healpix_output="galactic", f=None,
colormap="afmhot", grid_limits=None, image_size=800, nest=True,
figsize=None, interactive=False, title="", smooth=None, show=False, colorbar=True,
rotation=(0, 0, 0), **kwargs):
"""Viz data in 2d using a healpix column.
:param healpix_expression: {healpix_max_level}
:param healpix_max_level: {healpix_max_level}
:param healpix_level: {healpix_level}
:param what: {what}
:param selection: {selection}
:param grid: {grid}
:param healpix_input: Specificy if the healpix index is in "equatorial", "galactic" or "ecliptic".
:param healpix_output: Plot in "equatorial", "galactic" or "ecliptic".
:param f: function to apply to the data
:param colormap: matplotlib colormap
:param grid_limits: Optional sequence [minvalue, maxvalue] that determine the min and max value that map to the colormap (values below and above these are clipped to the the min/max). (default is [min(f(grid)), max(f(grid)))
:param image_size: size for the image that healpy uses for rendering
:param nest: If the healpix data is in nested (True) or ring (False)
:param figsize: If given, modify the matplotlib figure size. Example (14,9)
:param interactive: (Experimental, uses healpy.mollzoom is True)
:param title: Title of figure
:param smooth: apply gaussian smoothing, in degrees
:param show: Call matplotlib's show (True) or not (False, defaut)
:param rotation: Rotatate the plot, in format (lon, lat, psi) such that (lon, lat) is the center, and rotate on the screen by angle psi. All angles are degrees.
:return:
"""
# plot_level = healpix_level #healpix_max_level-reduce_level
import healpy as hp
import pylab as plt
if grid is None:
reduce_level = healpix_max_level - healpix_level
NSIDE = 2**healpix_level
nmax = hp.nside2npix(NSIDE)
# print nmax, np.sqrt(nmax)
scaling = 4**reduce_level
# print nmax
epsilon = 1. / scaling / 2
grid = self._stat(what=what, binby="%s/%s" % (healpix_expression, scaling), limits=[-epsilon, nmax - epsilon], shape=nmax, selection=selection)
if grid_limits:
grid_min, grid_max = grid_limits
else:
grid_min = grid_max = None
f_org = f
f = _parse_f(f)
if smooth:
if nest:
grid = hp.reorder(grid, inp="NEST", out="RING")
nest = False
# grid[np.isnan(grid)] = np.nanmean(grid)
grid = hp.smoothing(grid, sigma=np.radians(smooth))
fgrid = f(grid)
coord_map = dict(equatorial='C', galactic='G', ecliptic="E")
fig = plt.gcf()
if figsize is not None:
fig.set_size_inches(*figsize)
what_label = what
if f_org:
what_label = f_org + " " + what_label
f = hp.mollzoom if interactive else hp.mollview
with warnings.catch_warnings():
warnings.simplefilter("ignore")
coord = coord_map[healpix_input], coord_map[healpix_output]
if coord_map[healpix_input] == coord_map[healpix_output]:
coord = None
f(fgrid, unit=what_label, rot=rotation, nest=nest, title=title, coord=coord,
cmap=colormap, hold=True, xsize=image_size, min=grid_min, max=grid_max, cbar=colorbar, **kwargs)
if show:
plt.show() |
def download_file(self, project, path):
"""
Read file of a project and download it
:param project: A project object
:param path: The path of the file in the project
:returns: A file stream
"""
url = self._getUrl("/projects/{}/files/{}".format(project.id, path))
response = yield from self._session().request("GET", url, auth=self._auth)
if response.status == 404:
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(path))
return response | Read file of a project and download it
:param project: A project object
:param path: The path of the file in the project
:returns: A file stream | Below is the the instruction that describes the task:
### Input:
Read file of a project and download it
:param project: A project object
:param path: The path of the file in the project
:returns: A file stream
### Response:
def download_file(self, project, path):
"""
Read file of a project and download it
:param project: A project object
:param path: The path of the file in the project
:returns: A file stream
"""
url = self._getUrl("/projects/{}/files/{}".format(project.id, path))
response = yield from self._session().request("GET", url, auth=self._auth)
if response.status == 404:
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(path))
return response |
def readattr(path, name):
"""
Read attribute from sysfs and return as string
"""
try:
f = open(USB_SYS_PREFIX + path + "/" + name)
return f.readline().rstrip("\n")
except IOError:
return None | Read attribute from sysfs and return as string | Below is the the instruction that describes the task:
### Input:
Read attribute from sysfs and return as string
### Response:
def readattr(path, name):
"""
Read attribute from sysfs and return as string
"""
try:
f = open(USB_SYS_PREFIX + path + "/" + name)
return f.readline().rstrip("\n")
except IOError:
return None |
def add_mappings(self, defn: Definition, target: Dict) -> None:
""" Process any mappings in defn, adding all of the mappings prefixes to the namespace map and
add a link to the first mapping to the target
@param defn: Class or Slot definition
@param target: context target
"""
self.add_id_prefixes(defn)
for mapping in defn.mappings:
if '://' in mapping:
target['@id'] = mapping
else:
if ':' not in mapping or len(mapping.split(':')) != 2:
raise ValueError(f"Definition {defn.name} = unrecognized mapping: {mapping}")
ns = mapping.split(':')[0]
self.add_prefix(ns)
target['@id'] = defn.mappings[0] | Process any mappings in defn, adding all of the mappings prefixes to the namespace map and
add a link to the first mapping to the target
@param defn: Class or Slot definition
@param target: context target | Below is the the instruction that describes the task:
### Input:
Process any mappings in defn, adding all of the mappings prefixes to the namespace map and
add a link to the first mapping to the target
@param defn: Class or Slot definition
@param target: context target
### Response:
def add_mappings(self, defn: Definition, target: Dict) -> None:
""" Process any mappings in defn, adding all of the mappings prefixes to the namespace map and
add a link to the first mapping to the target
@param defn: Class or Slot definition
@param target: context target
"""
self.add_id_prefixes(defn)
for mapping in defn.mappings:
if '://' in mapping:
target['@id'] = mapping
else:
if ':' not in mapping or len(mapping.split(':')) != 2:
raise ValueError(f"Definition {defn.name} = unrecognized mapping: {mapping}")
ns = mapping.split(':')[0]
self.add_prefix(ns)
target['@id'] = defn.mappings[0] |
def iterGrid(self, minZoom, maxZoom):
"Yields the tileBounds, zoom, tileCol and tileRow"
assert minZoom in range(0, len(self.RESOLUTIONS))
assert maxZoom in range(0, len(self.RESOLUTIONS))
assert minZoom <= maxZoom
for zoom in xrange(minZoom, maxZoom + 1):
[minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
for row in xrange(minRow, maxRow + 1):
for col in xrange(minCol, maxCol + 1):
tileBounds = self.tileBounds(zoom, col, row)
yield (tileBounds, zoom, col, row) | Yields the tileBounds, zoom, tileCol and tileRow | Below is the the instruction that describes the task:
### Input:
Yields the tileBounds, zoom, tileCol and tileRow
### Response:
def iterGrid(self, minZoom, maxZoom):
"Yields the tileBounds, zoom, tileCol and tileRow"
assert minZoom in range(0, len(self.RESOLUTIONS))
assert maxZoom in range(0, len(self.RESOLUTIONS))
assert minZoom <= maxZoom
for zoom in xrange(minZoom, maxZoom + 1):
[minRow, minCol, maxRow, maxCol] = self.getExtentAddress(zoom)
for row in xrange(minRow, maxRow + 1):
for col in xrange(minCol, maxCol + 1):
tileBounds = self.tileBounds(zoom, col, row)
yield (tileBounds, zoom, col, row) |
def _format_lat(self, lat):
''' Format latitude to fit the image name '''
if self.ppd in [4, 8, 16, 32, 64]:
latcenter = '000N'
elif self.ppd in [128]:
if lat < 0:
latcenter = '450S'
else:
latcenter = '450N'
return latcenter | Format latitude to fit the image name | Below is the the instruction that describes the task:
### Input:
Format latitude to fit the image name
### Response:
def _format_lat(self, lat):
''' Format latitude to fit the image name '''
if self.ppd in [4, 8, 16, 32, 64]:
latcenter = '000N'
elif self.ppd in [128]:
if lat < 0:
latcenter = '450S'
else:
latcenter = '450N'
return latcenter |
def ask_password(*question: Token) -> str:
"""Ask the user to enter a password.
"""
tokens = get_ask_tokens(question)
info(*tokens)
answer = read_password()
return answer | Ask the user to enter a password. | Below is the the instruction that describes the task:
### Input:
Ask the user to enter a password.
### Response:
def ask_password(*question: Token) -> str:
"""Ask the user to enter a password.
"""
tokens = get_ask_tokens(question)
info(*tokens)
answer = read_password()
return answer |
def write(models, base=None, graph=None, rdfsonly=False, prefixes=None, logger=logging):
'''
See the command line help
'''
prefixes = prefixes or {}
g = graph or rdflib.Graph()
#g.bind('bf', BFNS)
#g.bind('bfc', BFCNS)
#g.bind('bfd', BFDNS)
g.bind('v', VNS)
for k, v in prefixes.items():
g.bind(k, v)
for m in models:
base_out = m.base
process(m, g, rdfsonly, base=base_out, logger=logger)
return g | See the command line help | Below is the the instruction that describes the task:
### Input:
See the command line help
### Response:
def write(models, base=None, graph=None, rdfsonly=False, prefixes=None, logger=logging):
'''
See the command line help
'''
prefixes = prefixes or {}
g = graph or rdflib.Graph()
#g.bind('bf', BFNS)
#g.bind('bfc', BFCNS)
#g.bind('bfd', BFDNS)
g.bind('v', VNS)
for k, v in prefixes.items():
g.bind(k, v)
for m in models:
base_out = m.base
process(m, g, rdfsonly, base=base_out, logger=logger)
return g |
def _start_machine(machine, session):
'''
Helper to try and start machines
@param machine:
@type machine: IMachine
@param session:
@type session: ISession
@return:
@rtype: IProgress or None
'''
try:
return machine.launchVMProcess(session, '', '')
except Exception as e:
log.debug(e.message, exc_info=True)
return None | Helper to try and start machines
@param machine:
@type machine: IMachine
@param session:
@type session: ISession
@return:
@rtype: IProgress or None | Below is the the instruction that describes the task:
### Input:
Helper to try and start machines
@param machine:
@type machine: IMachine
@param session:
@type session: ISession
@return:
@rtype: IProgress or None
### Response:
def _start_machine(machine, session):
'''
Helper to try and start machines
@param machine:
@type machine: IMachine
@param session:
@type session: ISession
@return:
@rtype: IProgress or None
'''
try:
return machine.launchVMProcess(session, '', '')
except Exception as e:
log.debug(e.message, exc_info=True)
return None |
def getIfStats(self):
"""Return dictionary of Traffic Stats for Network Interfaces.
@return: Nested dictionary of statistics for each interface.
"""
info_dict = {}
try:
fp = open(ifaceStatsFile, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading interface stats from file: %s'
% ifaceStatsFile)
for line in data.splitlines():
mobj = re.match('^\s*([\w\d:]+):\s*(.*\S)\s*$', line)
if mobj:
iface = mobj.group(1)
statline = mobj.group(2)
info_dict[iface] = dict(zip(
('rxbytes', 'rxpackets', 'rxerrs', 'rxdrop', 'rxfifo',
'rxframe', 'rxcompressed', 'rxmulticast',
'txbytes', 'txpackets', 'txerrs', 'txdrop', 'txfifo',
'txcolls', 'txcarrier', 'txcompressed'),
[int(x) for x in statline.split()]))
return info_dict | Return dictionary of Traffic Stats for Network Interfaces.
@return: Nested dictionary of statistics for each interface. | Below is the the instruction that describes the task:
### Input:
Return dictionary of Traffic Stats for Network Interfaces.
@return: Nested dictionary of statistics for each interface.
### Response:
def getIfStats(self):
"""Return dictionary of Traffic Stats for Network Interfaces.
@return: Nested dictionary of statistics for each interface.
"""
info_dict = {}
try:
fp = open(ifaceStatsFile, 'r')
data = fp.read()
fp.close()
except:
raise IOError('Failed reading interface stats from file: %s'
% ifaceStatsFile)
for line in data.splitlines():
mobj = re.match('^\s*([\w\d:]+):\s*(.*\S)\s*$', line)
if mobj:
iface = mobj.group(1)
statline = mobj.group(2)
info_dict[iface] = dict(zip(
('rxbytes', 'rxpackets', 'rxerrs', 'rxdrop', 'rxfifo',
'rxframe', 'rxcompressed', 'rxmulticast',
'txbytes', 'txpackets', 'txerrs', 'txdrop', 'txfifo',
'txcolls', 'txcarrier', 'txcompressed'),
[int(x) for x in statline.split()]))
return info_dict |
def set(self, key, value):
"""Set a configuration property."""
# Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet.
if self._jconf is not None:
self._jconf.set(key, unicode(value))
else:
self._conf[key] = unicode(value)
return self | Set a configuration property. | Below is the the instruction that describes the task:
### Input:
Set a configuration property.
### Response:
def set(self, key, value):
"""Set a configuration property."""
# Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet.
if self._jconf is not None:
self._jconf.set(key, unicode(value))
else:
self._conf[key] = unicode(value)
return self |
def add_external_reference(self,markable_id, external_ref):
"""
Adds an external reference for the given markable
@type markable_id: string
@param markable_id: the markable identifier
@type external_ref: L{CexternalReference}
@param external_ref: the external reference object
"""
if markable_id in self.idx:
markable_obj = Cterm(self.idx[markable_id],self.type)
markable_obj.add_external_reference(external_ref)
else:
print('{markable_id} not in self.idx'.format(**locals())) | Adds an external reference for the given markable
@type markable_id: string
@param markable_id: the markable identifier
@type external_ref: L{CexternalReference}
@param external_ref: the external reference object | Below is the the instruction that describes the task:
### Input:
Adds an external reference for the given markable
@type markable_id: string
@param markable_id: the markable identifier
@type external_ref: L{CexternalReference}
@param external_ref: the external reference object
### Response:
def add_external_reference(self,markable_id, external_ref):
"""
Adds an external reference for the given markable
@type markable_id: string
@param markable_id: the markable identifier
@type external_ref: L{CexternalReference}
@param external_ref: the external reference object
"""
if markable_id in self.idx:
markable_obj = Cterm(self.idx[markable_id],self.type)
markable_obj.add_external_reference(external_ref)
else:
print('{markable_id} not in self.idx'.format(**locals())) |
def visit_For(self, node):
"""
OUT = (node,) + last body statements
RAISES = body's that are not break or continue
"""
currs = (node,)
break_currs = tuple()
raises = ()
# handle body
for n in node.body:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
for nraise in nraises:
if isinstance(nraise, ast.Break):
break_currs += (nraise,)
elif isinstance(nraise, ast.Continue):
self.result.add_edge(nraise, node)
else:
raises += (nraise,)
# add the backward loop
for curr in currs:
self.result.add_edge(curr, node)
# the else statement if needed
if node.orelse:
for n in node.orelse:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
# while only
if hasattr(node, 'test') and is_true_predicate(node.test):
return break_currs, raises
return break_currs + currs, raises | OUT = (node,) + last body statements
RAISES = body's that are not break or continue | Below is the the instruction that describes the task:
### Input:
OUT = (node,) + last body statements
RAISES = body's that are not break or continue
### Response:
def visit_For(self, node):
"""
OUT = (node,) + last body statements
RAISES = body's that are not break or continue
"""
currs = (node,)
break_currs = tuple()
raises = ()
# handle body
for n in node.body:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
for nraise in nraises:
if isinstance(nraise, ast.Break):
break_currs += (nraise,)
elif isinstance(nraise, ast.Continue):
self.result.add_edge(nraise, node)
else:
raises += (nraise,)
# add the backward loop
for curr in currs:
self.result.add_edge(curr, node)
# the else statement if needed
if node.orelse:
for n in node.orelse:
self.result.add_node(n)
for curr in currs:
self.result.add_edge(curr, n)
currs, nraises = self.visit(n)
# while only
if hasattr(node, 'test') and is_true_predicate(node.test):
return break_currs, raises
return break_currs + currs, raises |
def expand(self, info=b"", length=32):
'''
Generate output key material based on an `info` value
Arguments:
- info - context to generate the OKM
- length - length in bytes of the key to generate
See the HKDF draft RFC for guidance.
'''
return hkdf_expand(self._prk, info, length, self._hash) | Generate output key material based on an `info` value
Arguments:
- info - context to generate the OKM
- length - length in bytes of the key to generate
See the HKDF draft RFC for guidance. | Below is the the instruction that describes the task:
### Input:
Generate output key material based on an `info` value
Arguments:
- info - context to generate the OKM
- length - length in bytes of the key to generate
See the HKDF draft RFC for guidance.
### Response:
def expand(self, info=b"", length=32):
'''
Generate output key material based on an `info` value
Arguments:
- info - context to generate the OKM
- length - length in bytes of the key to generate
See the HKDF draft RFC for guidance.
'''
return hkdf_expand(self._prk, info, length, self._hash) |
def logging_active_formatter(self, value):
"""
Setter for **self.__logging_active_formatter** attribute.
:param value: Attribute value.
:type value: unicode or QString
"""
if value is not None:
assert type(value) in (
unicode, QString), "'{0}' attribute: '{1}' type is not 'unicode' or 'QString'!".format(
"logging_active_formatter", value)
self.__logging_active_formatter = value | Setter for **self.__logging_active_formatter** attribute.
:param value: Attribute value.
:type value: unicode or QString | Below is the the instruction that describes the task:
### Input:
Setter for **self.__logging_active_formatter** attribute.
:param value: Attribute value.
:type value: unicode or QString
### Response:
def logging_active_formatter(self, value):
"""
Setter for **self.__logging_active_formatter** attribute.
:param value: Attribute value.
:type value: unicode or QString
"""
if value is not None:
assert type(value) in (
unicode, QString), "'{0}' attribute: '{1}' type is not 'unicode' or 'QString'!".format(
"logging_active_formatter", value)
self.__logging_active_formatter = value |
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code) | Compile argument and adds it to the list of code objects. | Below is the the instruction that describes the task:
### Input:
Compile argument and adds it to the list of code objects.
### Response:
def append_code_expr(self, code):
"""Compile argument and adds it to the list of code objects."""
# expects a string.
if isinstance(code, str) and not isinstance(code, unicode):
code = unicode(code)
if not isinstance(code, unicode):
raise TypeError("string expected")
log.debug("compiling code %s...", code)
try:
code_obj = compile(code, '<string>', 'eval')
self.code_objs[code] = code_obj
except SyntaxError as syntax_err:
log.error("cannot compile %s: %s", code, syntax_err)
raise
log.debug("compiled code %s", code) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.