code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_minimum_size(self, data):
"""Returns the rotated minimum size."""
size = self.element.get_minimum_size(data)
if self.angle in (RotateLM.NORMAL, RotateLM.UPSIDE_DOWN):
return size
else:
return datatypes.Point(size.y, size.x) | Returns the rotated minimum size. | Below is the the instruction that describes the task:
### Input:
Returns the rotated minimum size.
### Response:
def get_minimum_size(self, data):
"""Returns the rotated minimum size."""
size = self.element.get_minimum_size(data)
if self.angle in (RotateLM.NORMAL, RotateLM.UPSIDE_DOWN):
return size
else:
return datatypes.Point(size.y, size.x) |
def _bind(self):
"""
Create socket and bind
"""
credentials = pika.PlainCredentials(self.user, self.password)
params = pika.ConnectionParameters(credentials=credentials,
host=self.server,
virtual_host=self.vhost,
port=self.port)
self.connection = pika.BlockingConnection(params)
self.channel = self.connection.channel()
# NOTE : PIKA version uses 'exchange_type' instead of 'type'
self.channel.exchange_declare(exchange=self.topic_exchange,
exchange_type="topic") | Create socket and bind | Below is the the instruction that describes the task:
### Input:
Create socket and bind
### Response:
def _bind(self):
"""
Create socket and bind
"""
credentials = pika.PlainCredentials(self.user, self.password)
params = pika.ConnectionParameters(credentials=credentials,
host=self.server,
virtual_host=self.vhost,
port=self.port)
self.connection = pika.BlockingConnection(params)
self.channel = self.connection.channel()
# NOTE : PIKA version uses 'exchange_type' instead of 'type'
self.channel.exchange_declare(exchange=self.topic_exchange,
exchange_type="topic") |
def read_channels(self, fid):
"""Read channels from an acclaim file."""
bones = [[] for i in self.vertices]
num_channels = 0
for vertex in self.vertices:
num_channels = num_channels + len(vertex.meta['channels'])
lin = self.read_line(fid)
while lin != ':DEGREES':
lin = self.read_line(fid)
if lin == '':
raise ValueError('Could not find :DEGREES in ' + fid.name)
counter = 0
lin = self.read_line(fid)
while lin:
parts = lin.split()
if len(parts)==1:
frame_no = int(parts[0])
if frame_no:
counter += 1
if counter != frame_no:
raise ValueError('Unexpected frame number.')
else:
raise ValueError('Single bone name ...')
else:
ind = self.get_index_by_name(parts[0])
bones[ind].append(np.array([float(channel) for channel in parts[1:]]))
lin = self.read_line(fid)
num_frames = counter
channels = np.zeros((num_frames, num_channels))
end_val = 0
for i in range(len(self.vertices)):
vertex = self.vertices[i]
if len(vertex.meta['channels'])>0:
start_val = end_val
end_val = end_val + len(vertex.meta['channels'])
for j in range(num_frames):
channels[j, start_val:end_val] = bones[i][j]
self.resolve_indices(i, start_val)
self.smooth_angle_channels(channels)
return channels | Read channels from an acclaim file. | Below is the the instruction that describes the task:
### Input:
Read channels from an acclaim file.
### Response:
def read_channels(self, fid):
"""Read channels from an acclaim file."""
bones = [[] for i in self.vertices]
num_channels = 0
for vertex in self.vertices:
num_channels = num_channels + len(vertex.meta['channels'])
lin = self.read_line(fid)
while lin != ':DEGREES':
lin = self.read_line(fid)
if lin == '':
raise ValueError('Could not find :DEGREES in ' + fid.name)
counter = 0
lin = self.read_line(fid)
while lin:
parts = lin.split()
if len(parts)==1:
frame_no = int(parts[0])
if frame_no:
counter += 1
if counter != frame_no:
raise ValueError('Unexpected frame number.')
else:
raise ValueError('Single bone name ...')
else:
ind = self.get_index_by_name(parts[0])
bones[ind].append(np.array([float(channel) for channel in parts[1:]]))
lin = self.read_line(fid)
num_frames = counter
channels = np.zeros((num_frames, num_channels))
end_val = 0
for i in range(len(self.vertices)):
vertex = self.vertices[i]
if len(vertex.meta['channels'])>0:
start_val = end_val
end_val = end_val + len(vertex.meta['channels'])
for j in range(num_frames):
channels[j, start_val:end_val] = bones[i][j]
self.resolve_indices(i, start_val)
self.smooth_angle_channels(channels)
return channels |
def get_default_qubit_mapping(program):
"""
Takes a program which contains qubit placeholders and provides a mapping to the integers
0 through N-1.
The output of this function is suitable for input to :py:func:`address_qubits`.
:param program: A program containing qubit placeholders
:return: A dictionary mapping qubit placeholder to an addressed qubit from 0 through N-1.
"""
fake_qubits, real_qubits, qubits = _what_type_of_qubit_does_it_use(program)
if real_qubits:
warnings.warn("This program contains integer qubits, "
"so getting a mapping doesn't make sense.")
return {q: q for q in qubits}
return {qp: Qubit(i) for i, qp in enumerate(qubits)} | Takes a program which contains qubit placeholders and provides a mapping to the integers
0 through N-1.
The output of this function is suitable for input to :py:func:`address_qubits`.
:param program: A program containing qubit placeholders
:return: A dictionary mapping qubit placeholder to an addressed qubit from 0 through N-1. | Below is the the instruction that describes the task:
### Input:
Takes a program which contains qubit placeholders and provides a mapping to the integers
0 through N-1.
The output of this function is suitable for input to :py:func:`address_qubits`.
:param program: A program containing qubit placeholders
:return: A dictionary mapping qubit placeholder to an addressed qubit from 0 through N-1.
### Response:
def get_default_qubit_mapping(program):
"""
Takes a program which contains qubit placeholders and provides a mapping to the integers
0 through N-1.
The output of this function is suitable for input to :py:func:`address_qubits`.
:param program: A program containing qubit placeholders
:return: A dictionary mapping qubit placeholder to an addressed qubit from 0 through N-1.
"""
fake_qubits, real_qubits, qubits = _what_type_of_qubit_does_it_use(program)
if real_qubits:
warnings.warn("This program contains integer qubits, "
"so getting a mapping doesn't make sense.")
return {q: q for q in qubits}
return {qp: Qubit(i) for i, qp in enumerate(qubits)} |
def parse_log(self, bowtie_log):
"""Parse a bowtie log file.
This is a bowtie log parsing method that populates the
:py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with
data from the log file.
Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.
The insertion of data on the attribytes is done by the
:py:meth:`set_attribute method.
Parameters
----------
bowtie_log : str
Path to the boetie log file.
"""
print("is here!")
# Regexes - thanks to https://github.com/ewels/MultiQC/blob/master/multiqc/modules/bowtie2/bowtie2.py
regexes = {
'unpaired': {
'unpaired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times",
'unpaired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'unpaired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times"
},
'paired': {
'paired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly 0 times",
'paired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly exactly 1 time",
'paired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly >1 times",
'paired_aligned_discord_one': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly 1 time",
'paired_aligned_discord_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly >1 times",
'paired_aligned_mate_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'paired_aligned_mate_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times",
'paired_aligned_mate_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times"
}
}
#Missing parser for unpaired (not implemented in flowcraft yet)
with open(bowtie_log, "r") as f:
#Go through log file line by line
for l in f:
print(l)
#total reads
total = re.search(r"(\\d+) reads; of these:", l)
print(total)
if total:
print(total)
self.set_n_reads(total.group(1))
# Paired end reads aka the pain
paired = re.search(r"(\\d+) \\([\\d\\.]+%\\) were paired; of these:", l)
if paired:
paired_total = int(paired.group(1))
paired_numbers = {}
# Do nested loop whilst we have this level of indentation
l = f.readline()
while l.startswith(' '):
for k, r in regexes['paired'].items():
match = re.search(r, l)
if match:
paired_numbers[k] = int(match.group(1))
l = f.readline()
align_zero_times = paired_numbers['paired_aligned_none'] + paired_numbers['paired_aligned_mate_none']
if align_zero_times:
self.set_align_0x(align_zero_times)
align_one_time = paired_numbers['paired_aligned_one'] + paired_numbers['paired_aligned_mate_one']
if align_one_time:
self.set_align_1x(align_one_time)
align_more_than_one_time = paired_numbers['paired_aligned_multi'] + paired_numbers['paired_aligned_mate_multi']
if align_more_than_one_time:
self.set_align_mt1x(align_more_than_one_time)
# Overall alignment rate
overall = re.search(r"([\\d\\.]+)% overall alignment rate", l)
if overall:
self.overall_rate = float(overall.group(1)) | Parse a bowtie log file.
This is a bowtie log parsing method that populates the
:py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with
data from the log file.
Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.
The insertion of data on the attribytes is done by the
:py:meth:`set_attribute method.
Parameters
----------
bowtie_log : str
Path to the boetie log file. | Below is the the instruction that describes the task:
### Input:
Parse a bowtie log file.
This is a bowtie log parsing method that populates the
:py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with
data from the log file.
Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.
The insertion of data on the attribytes is done by the
:py:meth:`set_attribute method.
Parameters
----------
bowtie_log : str
Path to the boetie log file.
### Response:
def parse_log(self, bowtie_log):
"""Parse a bowtie log file.
This is a bowtie log parsing method that populates the
:py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with
data from the log file.
Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.
The insertion of data on the attribytes is done by the
:py:meth:`set_attribute method.
Parameters
----------
bowtie_log : str
Path to the boetie log file.
"""
print("is here!")
# Regexes - thanks to https://github.com/ewels/MultiQC/blob/master/multiqc/modules/bowtie2/bowtie2.py
regexes = {
'unpaired': {
'unpaired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times",
'unpaired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'unpaired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times"
},
'paired': {
'paired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly 0 times",
'paired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly exactly 1 time",
'paired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly >1 times",
'paired_aligned_discord_one': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly 1 time",
'paired_aligned_discord_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly >1 times",
'paired_aligned_mate_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'paired_aligned_mate_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times",
'paired_aligned_mate_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times"
}
}
#Missing parser for unpaired (not implemented in flowcraft yet)
with open(bowtie_log, "r") as f:
#Go through log file line by line
for l in f:
print(l)
#total reads
total = re.search(r"(\\d+) reads; of these:", l)
print(total)
if total:
print(total)
self.set_n_reads(total.group(1))
# Paired end reads aka the pain
paired = re.search(r"(\\d+) \\([\\d\\.]+%\\) were paired; of these:", l)
if paired:
paired_total = int(paired.group(1))
paired_numbers = {}
# Do nested loop whilst we have this level of indentation
l = f.readline()
while l.startswith(' '):
for k, r in regexes['paired'].items():
match = re.search(r, l)
if match:
paired_numbers[k] = int(match.group(1))
l = f.readline()
align_zero_times = paired_numbers['paired_aligned_none'] + paired_numbers['paired_aligned_mate_none']
if align_zero_times:
self.set_align_0x(align_zero_times)
align_one_time = paired_numbers['paired_aligned_one'] + paired_numbers['paired_aligned_mate_one']
if align_one_time:
self.set_align_1x(align_one_time)
align_more_than_one_time = paired_numbers['paired_aligned_multi'] + paired_numbers['paired_aligned_mate_multi']
if align_more_than_one_time:
self.set_align_mt1x(align_more_than_one_time)
# Overall alignment rate
overall = re.search(r"([\\d\\.]+)% overall alignment rate", l)
if overall:
self.overall_rate = float(overall.group(1)) |
def raw_corpus_rouge1(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-1 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-1 score as float between 0 and 1.
"""
return rouge.rouge_1(hypotheses, references) | Simple wrapper around ROUGE-1 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-1 score as float between 0 and 1. | Below is the the instruction that describes the task:
### Input:
Simple wrapper around ROUGE-1 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-1 score as float between 0 and 1.
### Response:
def raw_corpus_rouge1(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-1 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-1 score as float between 0 and 1.
"""
return rouge.rouge_1(hypotheses, references) |
def info_data(self, data_info=None, completo=True, key=None, verbose=True):
"""Show some info."""
def _info_dataframe(data_frame):
if completo:
print('\n', data_frame.info(), '\n', data_frame.describe(), '\n')
print(data_frame.head())
print(data_frame.tail())
if verbose:
if data_info is None:
_info_dataframe(self.data[key or self.masterkey])
elif type(data_info) is dict:
[_info_dataframe(df) for df in data_info.values()]
else:
_info_dataframe(data_info) | Show some info. | Below is the the instruction that describes the task:
### Input:
Show some info.
### Response:
def info_data(self, data_info=None, completo=True, key=None, verbose=True):
"""Show some info."""
def _info_dataframe(data_frame):
if completo:
print('\n', data_frame.info(), '\n', data_frame.describe(), '\n')
print(data_frame.head())
print(data_frame.tail())
if verbose:
if data_info is None:
_info_dataframe(self.data[key or self.masterkey])
elif type(data_info) is dict:
[_info_dataframe(df) for df in data_info.values()]
else:
_info_dataframe(data_info) |
def import_vmdk(self):
"""
All actions necessary to import vmdk (calls s3 upload, and import to aws ec2)
:param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric
execution
:return:
"""
# Set the inital upload to be the first region in the list
first_upload_region = self.aws_regions[0]
print "Initial AMI will be created in: {}".format(first_upload_region)
self.upload_to_s3(region=first_upload_region)
# If the upload was successful, the name to reference for import is now the basename
description = "AMI upload of: {}".format(os.path.basename(self.upload_file))
temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description)
import_id = self.run_ec2_import(file_location, description, first_upload_region)
self.wait_for_import_to_complete(import_id)
self.rename_image(import_id, self.ami_name, source_region=first_upload_region)
return import_id | All actions necessary to import vmdk (calls s3 upload, and import to aws ec2)
:param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric
execution
:return: | Below is the the instruction that describes the task:
### Input:
All actions necessary to import vmdk (calls s3 upload, and import to aws ec2)
:param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric
execution
:return:
### Response:
def import_vmdk(self):
"""
All actions necessary to import vmdk (calls s3 upload, and import to aws ec2)
:param vmdk_location: location of vmdk to import. Can be provided as a string, or the result output of fabric
execution
:return:
"""
# Set the inital upload to be the first region in the list
first_upload_region = self.aws_regions[0]
print "Initial AMI will be created in: {}".format(first_upload_region)
self.upload_to_s3(region=first_upload_region)
# If the upload was successful, the name to reference for import is now the basename
description = "AMI upload of: {}".format(os.path.basename(self.upload_file))
temp_fd, file_location = self.create_config_file(os.path.basename(self.upload_file), description)
import_id = self.run_ec2_import(file_location, description, first_upload_region)
self.wait_for_import_to_complete(import_id)
self.rename_image(import_id, self.ami_name, source_region=first_upload_region)
return import_id |
def topil(self):
"""Returns a PIL.Image version of this Pix"""
from PIL import Image
# Leptonica manages data in words, so it implicitly does an endian
# swap. Tell Pillow about this when it reads the data.
pix = self
if sys.byteorder == 'little':
if self.mode == 'RGB':
raw_mode = 'XBGR'
elif self.mode == 'RGBA':
raw_mode = 'ABGR'
elif self.mode == '1':
raw_mode = '1;I'
pix = Pix(lept.pixEndianByteSwapNew(pix._cdata))
else:
raw_mode = self.mode
pix = Pix(lept.pixEndianByteSwapNew(pix._cdata))
else:
raw_mode = self.mode # no endian swap needed
size = (pix._cdata.w, pix._cdata.h)
bytecount = pix._cdata.wpl * 4 * pix._cdata.h
buf = ffi.buffer(pix._cdata.data, bytecount)
stride = pix._cdata.wpl * 4
im = Image.frombytes(self.mode, size, buf, 'raw', raw_mode, stride)
return im | Returns a PIL.Image version of this Pix | Below is the the instruction that describes the task:
### Input:
Returns a PIL.Image version of this Pix
### Response:
def topil(self):
"""Returns a PIL.Image version of this Pix"""
from PIL import Image
# Leptonica manages data in words, so it implicitly does an endian
# swap. Tell Pillow about this when it reads the data.
pix = self
if sys.byteorder == 'little':
if self.mode == 'RGB':
raw_mode = 'XBGR'
elif self.mode == 'RGBA':
raw_mode = 'ABGR'
elif self.mode == '1':
raw_mode = '1;I'
pix = Pix(lept.pixEndianByteSwapNew(pix._cdata))
else:
raw_mode = self.mode
pix = Pix(lept.pixEndianByteSwapNew(pix._cdata))
else:
raw_mode = self.mode # no endian swap needed
size = (pix._cdata.w, pix._cdata.h)
bytecount = pix._cdata.wpl * 4 * pix._cdata.h
buf = ffi.buffer(pix._cdata.data, bytecount)
stride = pix._cdata.wpl * 4
im = Image.frombytes(self.mode, size, buf, 'raw', raw_mode, stride)
return im |
def create_namespaced_ingress(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_ingress # noqa: E501
create an Ingress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_ingress(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1Ingress body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1Ingress
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) # noqa: E501
else:
(data) = self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) # noqa: E501
return data | create_namespaced_ingress # noqa: E501
create an Ingress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_ingress(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1Ingress body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1Ingress
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
create_namespaced_ingress # noqa: E501
create an Ingress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_ingress(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1Ingress body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1Ingress
If the method is called asynchronously,
returns the request thread.
### Response:
def create_namespaced_ingress(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_ingress # noqa: E501
create an Ingress # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_ingress(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1Ingress body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1Ingress
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) # noqa: E501
else:
(data) = self.create_namespaced_ingress_with_http_info(namespace, body, **kwargs) # noqa: E501
return data |
def next_rise(self, latitude, longitude, altitude=None):
"""The next rise of the ISS.
:param latitude: latitude in degrees of location you want iss pass
above
:type latitude: float
:param longitude: longitude in degrees of location you want iss pass
above
:type longitude: float
:param altitude: altitude in meters of location you want iss pass
above, default is 100 when not given
:type altitude: float
:return: Return the next date when ISS will be over 10 degree above the
horizon
:rtype: datetime
"""
rise = self.pass_times(latitude, longitude, altitude,
2)
timestamp = rise[0]['risetime']
return datetime.fromtimestamp(timestamp) | The next rise of the ISS.
:param latitude: latitude in degrees of location you want iss pass
above
:type latitude: float
:param longitude: longitude in degrees of location you want iss pass
above
:type longitude: float
:param altitude: altitude in meters of location you want iss pass
above, default is 100 when not given
:type altitude: float
:return: Return the next date when ISS will be over 10 degree above the
horizon
:rtype: datetime | Below is the the instruction that describes the task:
### Input:
The next rise of the ISS.
:param latitude: latitude in degrees of location you want iss pass
above
:type latitude: float
:param longitude: longitude in degrees of location you want iss pass
above
:type longitude: float
:param altitude: altitude in meters of location you want iss pass
above, default is 100 when not given
:type altitude: float
:return: Return the next date when ISS will be over 10 degree above the
horizon
:rtype: datetime
### Response:
def next_rise(self, latitude, longitude, altitude=None):
"""The next rise of the ISS.
:param latitude: latitude in degrees of location you want iss pass
above
:type latitude: float
:param longitude: longitude in degrees of location you want iss pass
above
:type longitude: float
:param altitude: altitude in meters of location you want iss pass
above, default is 100 when not given
:type altitude: float
:return: Return the next date when ISS will be over 10 degree above the
horizon
:rtype: datetime
"""
rise = self.pass_times(latitude, longitude, altitude,
2)
timestamp = rise[0]['risetime']
return datetime.fromtimestamp(timestamp) |
def list_files_courses(self, course_id, content_types=None, include=None, only=None, order=None, search_term=None, sort=None):
"""
List files.
Returns the paginated list of files for the folder or course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - content_types
"""Filter results by content-type. You can specify type/subtype pairs (e.g.,
'image/jpeg'), or simply types (e.g., 'image', which will match
'image/gif', 'image/jpeg', etc.)."""
if content_types is not None:
params["content_types"] = content_types
# OPTIONAL - search_term
"""The partial name of the files to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - include
"""Array of additional information to include.
"user":: the user who uploaded the file or last edited its content
"usage_rights":: copyright and license information for the file (see UsageRights)"""
if include is not None:
self._validate_enum(include, ["user"])
params["include"] = include
# OPTIONAL - only
"""Array of information to restrict to. Overrides include[]
"names":: only returns file name information"""
if only is not None:
params["only"] = only
# OPTIONAL - sort
"""Sort results by this field. Defaults to 'name'. Note that `sort=user` implies `include[]=user`."""
if sort is not None:
self._validate_enum(sort, ["name", "size", "created_at", "updated_at", "content_type", "user"])
params["sort"] = sort
# OPTIONAL - order
"""The sorting order. Defaults to 'asc'."""
if order is not None:
self._validate_enum(order, ["asc", "desc"])
params["order"] = order
self.logger.debug("GET /api/v1/courses/{course_id}/files with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/files".format(**path), data=data, params=params, all_pages=True) | List files.
Returns the paginated list of files for the folder or course. | Below is the the instruction that describes the task:
### Input:
List files.
Returns the paginated list of files for the folder or course.
### Response:
def list_files_courses(self, course_id, content_types=None, include=None, only=None, order=None, search_term=None, sort=None):
"""
List files.
Returns the paginated list of files for the folder or course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - content_types
"""Filter results by content-type. You can specify type/subtype pairs (e.g.,
'image/jpeg'), or simply types (e.g., 'image', which will match
'image/gif', 'image/jpeg', etc.)."""
if content_types is not None:
params["content_types"] = content_types
# OPTIONAL - search_term
"""The partial name of the files to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - include
"""Array of additional information to include.
"user":: the user who uploaded the file or last edited its content
"usage_rights":: copyright and license information for the file (see UsageRights)"""
if include is not None:
self._validate_enum(include, ["user"])
params["include"] = include
# OPTIONAL - only
"""Array of information to restrict to. Overrides include[]
"names":: only returns file name information"""
if only is not None:
params["only"] = only
# OPTIONAL - sort
"""Sort results by this field. Defaults to 'name'. Note that `sort=user` implies `include[]=user`."""
if sort is not None:
self._validate_enum(sort, ["name", "size", "created_at", "updated_at", "content_type", "user"])
params["sort"] = sort
# OPTIONAL - order
"""The sorting order. Defaults to 'asc'."""
if order is not None:
self._validate_enum(order, ["asc", "desc"])
params["order"] = order
self.logger.debug("GET /api/v1/courses/{course_id}/files with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/files".format(**path), data=data, params=params, all_pages=True) |
def get_string_at_rva(self, rva):
"""Get an ASCII string located at the given address."""
s = self.get_section_by_rva(rva)
if not s:
return self.get_string_from_data(0, self.__data__[rva:rva+MAX_STRING_LENGTH])
return self.get_string_from_data( 0, s.get_data(rva, length=MAX_STRING_LENGTH) ) | Get an ASCII string located at the given address. | Below is the the instruction that describes the task:
### Input:
Get an ASCII string located at the given address.
### Response:
def get_string_at_rva(self, rva):
"""Get an ASCII string located at the given address."""
s = self.get_section_by_rva(rva)
if not s:
return self.get_string_from_data(0, self.__data__[rva:rva+MAX_STRING_LENGTH])
return self.get_string_from_data( 0, s.get_data(rva, length=MAX_STRING_LENGTH) ) |
def ols_matrix(A, norm_func=None):
"""
Generate the matrix used to solve OLS regression.
Parameters
----------
A: float array
The design matrix
norm: callable, optional
A normalization function to apply to the matrix, before extracting the
OLS matrix.
Notes
-----
The matrix needed for OLS regression for the equation:
..math ::
y = A \beta
is given by:
..math ::
\hat{\beta} = (A' x A)^{-1} A' y
See also
--------
http://en.wikipedia.org/wiki/Ordinary_least_squares#Estimation
"""
A = np.asarray(A)
if norm_func is not None:
X = np.matrix(unit_vector(A.copy(), norm_func=norm_func))
else:
X = np.matrix(A.copy())
return la.pinv(X.T * X) * X.T | Generate the matrix used to solve OLS regression.
Parameters
----------
A: float array
The design matrix
norm: callable, optional
A normalization function to apply to the matrix, before extracting the
OLS matrix.
Notes
-----
The matrix needed for OLS regression for the equation:
..math ::
y = A \beta
is given by:
..math ::
\hat{\beta} = (A' x A)^{-1} A' y
See also
--------
http://en.wikipedia.org/wiki/Ordinary_least_squares#Estimation | Below is the the instruction that describes the task:
### Input:
Generate the matrix used to solve OLS regression.
Parameters
----------
A: float array
The design matrix
norm: callable, optional
A normalization function to apply to the matrix, before extracting the
OLS matrix.
Notes
-----
The matrix needed for OLS regression for the equation:
..math ::
y = A \beta
is given by:
..math ::
\hat{\beta} = (A' x A)^{-1} A' y
See also
--------
http://en.wikipedia.org/wiki/Ordinary_least_squares#Estimation
### Response:
def ols_matrix(A, norm_func=None):
"""
Generate the matrix used to solve OLS regression.
Parameters
----------
A: float array
The design matrix
norm: callable, optional
A normalization function to apply to the matrix, before extracting the
OLS matrix.
Notes
-----
The matrix needed for OLS regression for the equation:
..math ::
y = A \beta
is given by:
..math ::
\hat{\beta} = (A' x A)^{-1} A' y
See also
--------
http://en.wikipedia.org/wiki/Ordinary_least_squares#Estimation
"""
A = np.asarray(A)
if norm_func is not None:
X = np.matrix(unit_vector(A.copy(), norm_func=norm_func))
else:
X = np.matrix(A.copy())
return la.pinv(X.T * X) * X.T |
def joint_distribution(dataframe, rownames, colnames):
"""Joint Distribution Table
- The Continguency Table normalized by the total number of observations
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
"""
cont_table = contingency_table(dataframe, rownames=rownames, colnames=colnames, margins=True)
total_observations = cont_table['All']['All']
return cont_table/total_observations | Joint Distribution Table
- The Continguency Table normalized by the total number of observations
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns | Below is the the instruction that describes the task:
### Input:
Joint Distribution Table
- The Continguency Table normalized by the total number of observations
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
### Response:
def joint_distribution(dataframe, rownames, colnames):
"""Joint Distribution Table
- The Continguency Table normalized by the total number of observations
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
"""
cont_table = contingency_table(dataframe, rownames=rownames, colnames=colnames, margins=True)
total_observations = cont_table['All']['All']
return cont_table/total_observations |
def get_orm_column_names(cls: Type, sort: bool = False) -> List[str]:
"""
Gets column names (that is, database column names) from an SQLAlchemy
ORM class.
"""
colnames = [col.name for col in get_orm_columns(cls)]
return sorted(colnames) if sort else colnames | Gets column names (that is, database column names) from an SQLAlchemy
ORM class. | Below is the the instruction that describes the task:
### Input:
Gets column names (that is, database column names) from an SQLAlchemy
ORM class.
### Response:
def get_orm_column_names(cls: Type, sort: bool = False) -> List[str]:
"""
Gets column names (that is, database column names) from an SQLAlchemy
ORM class.
"""
colnames = [col.name for col in get_orm_columns(cls)]
return sorted(colnames) if sort else colnames |
def process_priority(self, process_priority):
"""
Sets the process priority.
:param process_priority: string
"""
log.info('QEMU VM "{name}" [{id}] has set the process priority to {priority}'.format(name=self._name,
id=self._id,
priority=process_priority))
self._process_priority = process_priority | Sets the process priority.
:param process_priority: string | Below is the the instruction that describes the task:
### Input:
Sets the process priority.
:param process_priority: string
### Response:
def process_priority(self, process_priority):
"""
Sets the process priority.
:param process_priority: string
"""
log.info('QEMU VM "{name}" [{id}] has set the process priority to {priority}'.format(name=self._name,
id=self._id,
priority=process_priority))
self._process_priority = process_priority |
def trimsquants(self, col: str, sup: float):
"""
Remove superior quantiles from the dataframe
:param col: column name
:type col: str
:param sup: superior quantile
:type sup: float
:example: ``ds.trimsquants("Col 1", 0.99)``
"""
try:
self.df = self._trimquants(col, None, sup)
except Exception as e:
self.err(e, self.trimsquants, "Can not trim superior quantiles") | Remove superior quantiles from the dataframe
:param col: column name
:type col: str
:param sup: superior quantile
:type sup: float
:example: ``ds.trimsquants("Col 1", 0.99)`` | Below is the the instruction that describes the task:
### Input:
Remove superior quantiles from the dataframe
:param col: column name
:type col: str
:param sup: superior quantile
:type sup: float
:example: ``ds.trimsquants("Col 1", 0.99)``
### Response:
def trimsquants(self, col: str, sup: float):
"""
Remove superior quantiles from the dataframe
:param col: column name
:type col: str
:param sup: superior quantile
:type sup: float
:example: ``ds.trimsquants("Col 1", 0.99)``
"""
try:
self.df = self._trimquants(col, None, sup)
except Exception as e:
self.err(e, self.trimsquants, "Can not trim superior quantiles") |
def _set_path(self, v, load=False):
"""
Setter method for path, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_path() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("path_name",path.path, yang_name="path", rest_name="path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'info': u'Define Path', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsPath', u'cli-mode-name': u'config-router-mpls-path-$(path-name)'}}), is_container='list', yang_name="path", rest_name="path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define Path', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsPath', u'cli-mode-name': u'config-router-mpls-path-$(path-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """path must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("path_name",path.path, yang_name="path", rest_name="path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'info': u'Define Path', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsPath', u'cli-mode-name': u'config-router-mpls-path-$(path-name)'}}), is_container='list', yang_name="path", rest_name="path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define Path', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsPath', u'cli-mode-name': u'config-router-mpls-path-$(path-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__path = t
if hasattr(self, '_set'):
self._set() | Setter method for path, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_path() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for path, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_path() directly.
### Response:
def _set_path(self, v, load=False):
"""
Setter method for path, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_path() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("path_name",path.path, yang_name="path", rest_name="path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'info': u'Define Path', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsPath', u'cli-mode-name': u'config-router-mpls-path-$(path-name)'}}), is_container='list', yang_name="path", rest_name="path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define Path', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsPath', u'cli-mode-name': u'config-router-mpls-path-$(path-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """path must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("path_name",path.path, yang_name="path", rest_name="path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='path-name', extensions={u'tailf-common': {u'info': u'Define Path', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsPath', u'cli-mode-name': u'config-router-mpls-path-$(path-name)'}}), is_container='list', yang_name="path", rest_name="path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define Path', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsPath', u'cli-mode-name': u'config-router-mpls-path-$(path-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__path = t
if hasattr(self, '_set'):
self._set() |
def discharge(self, username, macaroon):
"""Discharge the macarooon for the identity.
Raise a ServerError if an error occurs in the request process.
@param username The logged in user.
@param macaroon The macaroon returned from the charm store.
@return The resulting base64 encoded macaroon.
@raises ServerError when making request to the discharge endpoint
InvalidMacaroon when the macaroon passedin or discharged is invalid
"""
caveats = macaroon.third_party_caveats()
if len(caveats) != 1:
raise InvalidMacaroon(
'Invalid number of third party caveats (1 != {})'
''.format(len(caveats)))
url = '{}discharger/discharge?discharge-for-user={}&id={}'.format(
self.url, quote(username), caveats[0][1])
logging.debug('Sending identity info to {}'.format(url))
logging.debug('data is {}'.format(caveats[0][1]))
response = make_request(url, method='POST', timeout=self.timeout)
try:
macaroon = response['Macaroon']
json_macaroon = json.dumps(macaroon)
except (KeyError, UnicodeDecodeError) as err:
raise InvalidMacaroon(
'Invalid macaroon from discharger: {}'.format(err.message))
return base64.urlsafe_b64encode(json_macaroon.encode('utf-8')) | Discharge the macarooon for the identity.
Raise a ServerError if an error occurs in the request process.
@param username The logged in user.
@param macaroon The macaroon returned from the charm store.
@return The resulting base64 encoded macaroon.
@raises ServerError when making request to the discharge endpoint
InvalidMacaroon when the macaroon passedin or discharged is invalid | Below is the the instruction that describes the task:
### Input:
Discharge the macarooon for the identity.
Raise a ServerError if an error occurs in the request process.
@param username The logged in user.
@param macaroon The macaroon returned from the charm store.
@return The resulting base64 encoded macaroon.
@raises ServerError when making request to the discharge endpoint
InvalidMacaroon when the macaroon passedin or discharged is invalid
### Response:
def discharge(self, username, macaroon):
"""Discharge the macarooon for the identity.
Raise a ServerError if an error occurs in the request process.
@param username The logged in user.
@param macaroon The macaroon returned from the charm store.
@return The resulting base64 encoded macaroon.
@raises ServerError when making request to the discharge endpoint
InvalidMacaroon when the macaroon passedin or discharged is invalid
"""
caveats = macaroon.third_party_caveats()
if len(caveats) != 1:
raise InvalidMacaroon(
'Invalid number of third party caveats (1 != {})'
''.format(len(caveats)))
url = '{}discharger/discharge?discharge-for-user={}&id={}'.format(
self.url, quote(username), caveats[0][1])
logging.debug('Sending identity info to {}'.format(url))
logging.debug('data is {}'.format(caveats[0][1]))
response = make_request(url, method='POST', timeout=self.timeout)
try:
macaroon = response['Macaroon']
json_macaroon = json.dumps(macaroon)
except (KeyError, UnicodeDecodeError) as err:
raise InvalidMacaroon(
'Invalid macaroon from discharger: {}'.format(err.message))
return base64.urlsafe_b64encode(json_macaroon.encode('utf-8')) |
def load_csv(ctx, model, path, header=None, header_exclude=None, **fmtparams):
"""Load a CSV from a file path.
:param ctx: Anthem context
:param model: Odoo model name or model klass from env
:param path: absolute or relative path to CSV file.
If a relative path is given you must provide a value for
`ODOO_DATA_PATH` in your environment
or set `--odoo-data-path` option.
:param header: whitelist of CSV columns to load
:param header_exclude: blacklist of CSV columns to not load
:param fmtparams: keyword params for `csv_unireader`
Usage example::
from pkg_resources import Requirement, resource_string
req = Requirement.parse('my-project')
load_csv(ctx, ctx.env['res.users'],
resource_string(req, 'data/users.csv'),
delimiter=',')
"""
if not os.path.isabs(path):
if ctx.options.odoo_data_path:
path = os.path.join(ctx.options.odoo_data_path, path)
else:
raise AnthemError(
'Got a relative path. '
'Please, provide a value for `ODOO_DATA_PATH` '
'in your environment or set `--odoo-data-path` option.'
)
with open(path, 'rb') as data:
load_csv_stream(ctx, model, data,
header=header, header_exclude=header_exclude,
**fmtparams) | Load a CSV from a file path.
:param ctx: Anthem context
:param model: Odoo model name or model klass from env
:param path: absolute or relative path to CSV file.
If a relative path is given you must provide a value for
`ODOO_DATA_PATH` in your environment
or set `--odoo-data-path` option.
:param header: whitelist of CSV columns to load
:param header_exclude: blacklist of CSV columns to not load
:param fmtparams: keyword params for `csv_unireader`
Usage example::
from pkg_resources import Requirement, resource_string
req = Requirement.parse('my-project')
load_csv(ctx, ctx.env['res.users'],
resource_string(req, 'data/users.csv'),
delimiter=',') | Below is the the instruction that describes the task:
### Input:
Load a CSV from a file path.
:param ctx: Anthem context
:param model: Odoo model name or model klass from env
:param path: absolute or relative path to CSV file.
If a relative path is given you must provide a value for
`ODOO_DATA_PATH` in your environment
or set `--odoo-data-path` option.
:param header: whitelist of CSV columns to load
:param header_exclude: blacklist of CSV columns to not load
:param fmtparams: keyword params for `csv_unireader`
Usage example::
from pkg_resources import Requirement, resource_string
req = Requirement.parse('my-project')
load_csv(ctx, ctx.env['res.users'],
resource_string(req, 'data/users.csv'),
delimiter=',')
### Response:
def load_csv(ctx, model, path, header=None, header_exclude=None, **fmtparams):
"""Load a CSV from a file path.
:param ctx: Anthem context
:param model: Odoo model name or model klass from env
:param path: absolute or relative path to CSV file.
If a relative path is given you must provide a value for
`ODOO_DATA_PATH` in your environment
or set `--odoo-data-path` option.
:param header: whitelist of CSV columns to load
:param header_exclude: blacklist of CSV columns to not load
:param fmtparams: keyword params for `csv_unireader`
Usage example::
from pkg_resources import Requirement, resource_string
req = Requirement.parse('my-project')
load_csv(ctx, ctx.env['res.users'],
resource_string(req, 'data/users.csv'),
delimiter=',')
"""
if not os.path.isabs(path):
if ctx.options.odoo_data_path:
path = os.path.join(ctx.options.odoo_data_path, path)
else:
raise AnthemError(
'Got a relative path. '
'Please, provide a value for `ODOO_DATA_PATH` '
'in your environment or set `--odoo-data-path` option.'
)
with open(path, 'rb') as data:
load_csv_stream(ctx, model, data,
header=header, header_exclude=header_exclude,
**fmtparams) |
def create_record_sets(self, record_set_dicts):
"""Accept list of record_set dicts.
Return list of record_set objects."""
record_set_objects = []
for record_set_dict in record_set_dicts:
# pop removes the 'Enabled' key and tests if True.
if record_set_dict.pop('Enabled', True):
record_set_objects.append(
self.create_record_set(record_set_dict)
)
return record_set_objects | Accept list of record_set dicts.
Return list of record_set objects. | Below is the the instruction that describes the task:
### Input:
Accept list of record_set dicts.
Return list of record_set objects.
### Response:
def create_record_sets(self, record_set_dicts):
"""Accept list of record_set dicts.
Return list of record_set objects."""
record_set_objects = []
for record_set_dict in record_set_dicts:
# pop removes the 'Enabled' key and tests if True.
if record_set_dict.pop('Enabled', True):
record_set_objects.append(
self.create_record_set(record_set_dict)
)
return record_set_objects |
def scopeMatch(assumedScopes, requiredScopeSets):
"""
Take a list of a assumed scopes, and a list of required scope sets on
disjunctive normal form, and check if any of the required scope sets are
satisfied.
Example:
requiredScopeSets = [
["scopeA", "scopeB"],
["scopeC"]
]
In this case assumed_scopes must contain, either:
"scopeA" AND "scopeB", OR just "scopeC".
"""
for scopeSet in requiredScopeSets:
for requiredScope in scopeSet:
for scope in assumedScopes:
if scope == requiredScope:
# requiredScope satisifed, no need to check more scopes
break
if scope.endswith("*") and requiredScope.startswith(scope[:-1]):
# requiredScope satisifed, no need to check more scopes
break
else:
# requiredScope not satisfied, stop checking scopeSet
break
else:
# scopeSet satisfied, so we're happy
return True
# none of the requiredScopeSets were satisfied
return False | Take a list of a assumed scopes, and a list of required scope sets on
disjunctive normal form, and check if any of the required scope sets are
satisfied.
Example:
requiredScopeSets = [
["scopeA", "scopeB"],
["scopeC"]
]
In this case assumed_scopes must contain, either:
"scopeA" AND "scopeB", OR just "scopeC". | Below is the the instruction that describes the task:
### Input:
Take a list of a assumed scopes, and a list of required scope sets on
disjunctive normal form, and check if any of the required scope sets are
satisfied.
Example:
requiredScopeSets = [
["scopeA", "scopeB"],
["scopeC"]
]
In this case assumed_scopes must contain, either:
"scopeA" AND "scopeB", OR just "scopeC".
### Response:
def scopeMatch(assumedScopes, requiredScopeSets):
"""
Take a list of a assumed scopes, and a list of required scope sets on
disjunctive normal form, and check if any of the required scope sets are
satisfied.
Example:
requiredScopeSets = [
["scopeA", "scopeB"],
["scopeC"]
]
In this case assumed_scopes must contain, either:
"scopeA" AND "scopeB", OR just "scopeC".
"""
for scopeSet in requiredScopeSets:
for requiredScope in scopeSet:
for scope in assumedScopes:
if scope == requiredScope:
# requiredScope satisifed, no need to check more scopes
break
if scope.endswith("*") and requiredScope.startswith(scope[:-1]):
# requiredScope satisifed, no need to check more scopes
break
else:
# requiredScope not satisfied, stop checking scopeSet
break
else:
# scopeSet satisfied, so we're happy
return True
# none of the requiredScopeSets were satisfied
return False |
def creator(entry, config):
"""Creator function for creating an instance of an Ansible script."""
ansible_playbook = "ansible.playbook.dry.run.see.comment"
ansible_inventory = "ansible.inventory.dry.run.see.comment"
ansible_playbook_content = render(config.script, model=config.model, env=config.env,
variables=config.variables, item=config.item)
ansible_inventory_content = render(entry['inventory'], model=config.model, env=config.env,
variables=config.variables, item=config.item)
if not config.dry_run:
ansible_playbook = write_temporary_file(ansible_playbook_content, 'ansible-play-', '.yaml')
ansible_playbook_content = ''
ansible_inventory = write_temporary_file(ansible_inventory_content, prefix='ansible-inventory-')
ansible_inventory_content = ''
# rendering the Bash script for running the Ansible playbook
template_file = os.path.join(os.path.dirname(__file__), 'templates/ansible.sh.j2')
with open(template_file) as handle:
template = handle.read()
config.script = render(template, debug=config.debug,
ansible_playbook_content=ansible_playbook_content,
ansible_playbook=ansible_playbook,
ansible_inventory_content=ansible_inventory_content,
ansible_inventory=ansible_inventory,
limit=entry['limit'])
return Ansible(config) | Creator function for creating an instance of an Ansible script. | Below is the the instruction that describes the task:
### Input:
Creator function for creating an instance of an Ansible script.
### Response:
def creator(entry, config):
"""Creator function for creating an instance of an Ansible script."""
ansible_playbook = "ansible.playbook.dry.run.see.comment"
ansible_inventory = "ansible.inventory.dry.run.see.comment"
ansible_playbook_content = render(config.script, model=config.model, env=config.env,
variables=config.variables, item=config.item)
ansible_inventory_content = render(entry['inventory'], model=config.model, env=config.env,
variables=config.variables, item=config.item)
if not config.dry_run:
ansible_playbook = write_temporary_file(ansible_playbook_content, 'ansible-play-', '.yaml')
ansible_playbook_content = ''
ansible_inventory = write_temporary_file(ansible_inventory_content, prefix='ansible-inventory-')
ansible_inventory_content = ''
# rendering the Bash script for running the Ansible playbook
template_file = os.path.join(os.path.dirname(__file__), 'templates/ansible.sh.j2')
with open(template_file) as handle:
template = handle.read()
config.script = render(template, debug=config.debug,
ansible_playbook_content=ansible_playbook_content,
ansible_playbook=ansible_playbook,
ansible_inventory_content=ansible_inventory_content,
ansible_inventory=ansible_inventory,
limit=entry['limit'])
return Ansible(config) |
def option(self, key, value=None, **kwargs):
"""Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining
"""
if not isinstance(self._container, Section):
raise ValueError("Options can only be added inside a section!")
option = Option(key, value, container=self._container, **kwargs)
option.value = value
self._container.structure.insert(self._idx, option)
self._idx += 1
return self | Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining | Below is the the instruction that describes the task:
### Input:
Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining
### Response:
def option(self, key, value=None, **kwargs):
"""Creates a new option inside a section
Args:
key (str): key of the option
value (str or None): value of the option
**kwargs: are passed to the constructor of :class:`Option`
Returns:
self for chaining
"""
if not isinstance(self._container, Section):
raise ValueError("Options can only be added inside a section!")
option = Option(key, value, container=self._container, **kwargs)
option.value = value
self._container.structure.insert(self._idx, option)
self._idx += 1
return self |
def smallest_signed_angle(source, target):
"""Find the smallest angle going from angle `source` to angle `target`."""
dth = target - source
dth = (dth + np.pi) % (2.0 * np.pi) - np.pi
return dth | Find the smallest angle going from angle `source` to angle `target`. | Below is the the instruction that describes the task:
### Input:
Find the smallest angle going from angle `source` to angle `target`.
### Response:
def smallest_signed_angle(source, target):
"""Find the smallest angle going from angle `source` to angle `target`."""
dth = target - source
dth = (dth + np.pi) % (2.0 * np.pi) - np.pi
return dth |
def get_file(path,
dest,
saltenv='base',
makedirs=False,
template=None,
gzip=None,
**kwargs):
'''
.. versionchanged:: 2018.3.0
``dest`` can now be a directory
Used to get a single file from the salt master
CLI Example:
.. code-block:: bash
salt '*' cp.get_file salt://path/to/file /minion/dest
Template rendering can be enabled on both the source and destination file
names like so:
.. code-block:: bash
salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja
This example would instruct all Salt minions to download the vimrc from a
directory with the same name as their os grain and copy it to /etc/vimrc
For larger files, the cp.get_file module also supports gzip compression.
Because gzip is CPU-intensive, this should only be used in scenarios where
the compression ratio is very high (e.g. pretty-printed JSON or YAML
files).
Use the *gzip* named argument to enable it. Valid values are 1..9, where 1
is the lightest compression and 9 the heaviest. 1 uses the least CPU on
the master (and minion), 9 uses the most.
There are two ways of defining the fileserver environment (a.k.a.
``saltenv``) from which to retrieve the file. One is to use the ``saltenv``
parameter, and the other is to use a querystring syntax in the ``salt://``
URL. The below two examples are equivalent:
.. code-block:: bash
salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config
salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf
.. note::
It may be necessary to quote the URL when using the querystring method,
depending on the shell being used to run the command.
'''
(path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs)
path, senv = salt.utils.url.split_env(path)
if senv:
saltenv = senv
if not hash_file(path, saltenv):
return ''
else:
return _client().get_file(
path,
dest,
makedirs,
saltenv,
gzip) | .. versionchanged:: 2018.3.0
``dest`` can now be a directory
Used to get a single file from the salt master
CLI Example:
.. code-block:: bash
salt '*' cp.get_file salt://path/to/file /minion/dest
Template rendering can be enabled on both the source and destination file
names like so:
.. code-block:: bash
salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja
This example would instruct all Salt minions to download the vimrc from a
directory with the same name as their os grain and copy it to /etc/vimrc
For larger files, the cp.get_file module also supports gzip compression.
Because gzip is CPU-intensive, this should only be used in scenarios where
the compression ratio is very high (e.g. pretty-printed JSON or YAML
files).
Use the *gzip* named argument to enable it. Valid values are 1..9, where 1
is the lightest compression and 9 the heaviest. 1 uses the least CPU on
the master (and minion), 9 uses the most.
There are two ways of defining the fileserver environment (a.k.a.
``saltenv``) from which to retrieve the file. One is to use the ``saltenv``
parameter, and the other is to use a querystring syntax in the ``salt://``
URL. The below two examples are equivalent:
.. code-block:: bash
salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config
salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf
.. note::
It may be necessary to quote the URL when using the querystring method,
depending on the shell being used to run the command. | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 2018.3.0
``dest`` can now be a directory
Used to get a single file from the salt master
CLI Example:
.. code-block:: bash
salt '*' cp.get_file salt://path/to/file /minion/dest
Template rendering can be enabled on both the source and destination file
names like so:
.. code-block:: bash
salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja
This example would instruct all Salt minions to download the vimrc from a
directory with the same name as their os grain and copy it to /etc/vimrc
For larger files, the cp.get_file module also supports gzip compression.
Because gzip is CPU-intensive, this should only be used in scenarios where
the compression ratio is very high (e.g. pretty-printed JSON or YAML
files).
Use the *gzip* named argument to enable it. Valid values are 1..9, where 1
is the lightest compression and 9 the heaviest. 1 uses the least CPU on
the master (and minion), 9 uses the most.
There are two ways of defining the fileserver environment (a.k.a.
``saltenv``) from which to retrieve the file. One is to use the ``saltenv``
parameter, and the other is to use a querystring syntax in the ``salt://``
URL. The below two examples are equivalent:
.. code-block:: bash
salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config
salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf
.. note::
It may be necessary to quote the URL when using the querystring method,
depending on the shell being used to run the command.
### Response:
def get_file(path,
dest,
saltenv='base',
makedirs=False,
template=None,
gzip=None,
**kwargs):
'''
.. versionchanged:: 2018.3.0
``dest`` can now be a directory
Used to get a single file from the salt master
CLI Example:
.. code-block:: bash
salt '*' cp.get_file salt://path/to/file /minion/dest
Template rendering can be enabled on both the source and destination file
names like so:
.. code-block:: bash
salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja
This example would instruct all Salt minions to download the vimrc from a
directory with the same name as their os grain and copy it to /etc/vimrc
For larger files, the cp.get_file module also supports gzip compression.
Because gzip is CPU-intensive, this should only be used in scenarios where
the compression ratio is very high (e.g. pretty-printed JSON or YAML
files).
Use the *gzip* named argument to enable it. Valid values are 1..9, where 1
is the lightest compression and 9 the heaviest. 1 uses the least CPU on
the master (and minion), 9 uses the most.
There are two ways of defining the fileserver environment (a.k.a.
``saltenv``) from which to retrieve the file. One is to use the ``saltenv``
parameter, and the other is to use a querystring syntax in the ``salt://``
URL. The below two examples are equivalent:
.. code-block:: bash
salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config
salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf
.. note::
It may be necessary to quote the URL when using the querystring method,
depending on the shell being used to run the command.
'''
(path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs)
path, senv = salt.utils.url.split_env(path)
if senv:
saltenv = senv
if not hash_file(path, saltenv):
return ''
else:
return _client().get_file(
path,
dest,
makedirs,
saltenv,
gzip) |
def get_available_languages(self, obj):
"""
Fetching the available languages as queryset.
"""
if obj:
return obj.get_available_languages()
else:
return self.model._parler_meta.root_model.objects.none() | Fetching the available languages as queryset. | Below is the the instruction that describes the task:
### Input:
Fetching the available languages as queryset.
### Response:
def get_available_languages(self, obj):
"""
Fetching the available languages as queryset.
"""
if obj:
return obj.get_available_languages()
else:
return self.model._parler_meta.root_model.objects.none() |
def box_ids(creds: dict, cred_ids: list = None) -> dict:
"""
Given a credentials structure and an optional list of credential identifiers
(aka wallet cred-ids, referents; specify None to include all), return dict mapping each
credential identifier to a box ids structure (i.e., a dict specifying its corresponding
schema identifier, credential definition identifier, and revocation registry identifier,
the latter being None if cred def does not support revocation).
:param creds: creds structure returned by (HolderProver agent) get_creds()
:param cred_ids: list of credential identifiers for which to find corresponding schema identifiers, None for all
:return: dict mapping each credential identifier to its corresponding box ids (empty dict if
no matching credential identifiers present)
"""
rv = {}
for inner_creds in {**creds.get('attrs', {}), **creds.get('predicates', {})}.values():
for cred in inner_creds: # cred is a dict in a list of dicts
cred_info = cred['cred_info']
cred_id = cred_info['referent']
if (cred_id not in rv) and (not cred_ids or cred_id in cred_ids):
rv[cred_id] = {
'schema_id': cred_info['schema_id'],
'cred_def_id': cred_info['cred_def_id'],
'rev_reg_id': cred_info['rev_reg_id']
}
return rv | Given a credentials structure and an optional list of credential identifiers
(aka wallet cred-ids, referents; specify None to include all), return dict mapping each
credential identifier to a box ids structure (i.e., a dict specifying its corresponding
schema identifier, credential definition identifier, and revocation registry identifier,
the latter being None if cred def does not support revocation).
:param creds: creds structure returned by (HolderProver agent) get_creds()
:param cred_ids: list of credential identifiers for which to find corresponding schema identifiers, None for all
:return: dict mapping each credential identifier to its corresponding box ids (empty dict if
no matching credential identifiers present) | Below is the the instruction that describes the task:
### Input:
Given a credentials structure and an optional list of credential identifiers
(aka wallet cred-ids, referents; specify None to include all), return dict mapping each
credential identifier to a box ids structure (i.e., a dict specifying its corresponding
schema identifier, credential definition identifier, and revocation registry identifier,
the latter being None if cred def does not support revocation).
:param creds: creds structure returned by (HolderProver agent) get_creds()
:param cred_ids: list of credential identifiers for which to find corresponding schema identifiers, None for all
:return: dict mapping each credential identifier to its corresponding box ids (empty dict if
no matching credential identifiers present)
### Response:
def box_ids(creds: dict, cred_ids: list = None) -> dict:
"""
Given a credentials structure and an optional list of credential identifiers
(aka wallet cred-ids, referents; specify None to include all), return dict mapping each
credential identifier to a box ids structure (i.e., a dict specifying its corresponding
schema identifier, credential definition identifier, and revocation registry identifier,
the latter being None if cred def does not support revocation).
:param creds: creds structure returned by (HolderProver agent) get_creds()
:param cred_ids: list of credential identifiers for which to find corresponding schema identifiers, None for all
:return: dict mapping each credential identifier to its corresponding box ids (empty dict if
no matching credential identifiers present)
"""
rv = {}
for inner_creds in {**creds.get('attrs', {}), **creds.get('predicates', {})}.values():
for cred in inner_creds: # cred is a dict in a list of dicts
cred_info = cred['cred_info']
cred_id = cred_info['referent']
if (cred_id not in rv) and (not cred_ids or cred_id in cred_ids):
rv[cred_id] = {
'schema_id': cred_info['schema_id'],
'cred_def_id': cred_info['cred_def_id'],
'rev_reg_id': cred_info['rev_reg_id']
}
return rv |
def run(self, *args):
"""Endit profile information."""
uuid, kwargs = self.__parse_arguments(*args)
code = self.edit_profile(uuid, **kwargs)
return code | Endit profile information. | Below is the the instruction that describes the task:
### Input:
Endit profile information.
### Response:
def run(self, *args):
"""Endit profile information."""
uuid, kwargs = self.__parse_arguments(*args)
code = self.edit_profile(uuid, **kwargs)
return code |
def previous(self, day_of_week=None):
"""
Modify to the previous occurrence of a given day of the week.
If no day_of_week is provided, modify to the previous occurrence
of the current day of the week. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:param day_of_week: The previous day of week to reset to.
:type day_of_week: int or None
:rtype: Date
"""
if day_of_week is None:
day_of_week = self.day_of_week
if day_of_week < SUNDAY or day_of_week > SATURDAY:
raise ValueError("Invalid day of week")
dt = self.subtract(days=1)
while dt.day_of_week != day_of_week:
dt = dt.subtract(days=1)
return dt | Modify to the previous occurrence of a given day of the week.
If no day_of_week is provided, modify to the previous occurrence
of the current day of the week. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:param day_of_week: The previous day of week to reset to.
:type day_of_week: int or None
:rtype: Date | Below is the the instruction that describes the task:
### Input:
Modify to the previous occurrence of a given day of the week.
If no day_of_week is provided, modify to the previous occurrence
of the current day of the week. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:param day_of_week: The previous day of week to reset to.
:type day_of_week: int or None
:rtype: Date
### Response:
def previous(self, day_of_week=None):
"""
Modify to the previous occurrence of a given day of the week.
If no day_of_week is provided, modify to the previous occurrence
of the current day of the week. Use the supplied consts
to indicate the desired day_of_week, ex. pendulum.MONDAY.
:param day_of_week: The previous day of week to reset to.
:type day_of_week: int or None
:rtype: Date
"""
if day_of_week is None:
day_of_week = self.day_of_week
if day_of_week < SUNDAY or day_of_week > SATURDAY:
raise ValueError("Invalid day of week")
dt = self.subtract(days=1)
while dt.day_of_week != day_of_week:
dt = dt.subtract(days=1)
return dt |
def matrix_decomp(self, cache=None):
"""Compute a Hermitian eigenbasis decomposition of the matrix.
Parameters
----------
cache : bool or None, optional
If ``True``, store the decomposition internally. For None,
the ``cache_mat_decomp`` from class initialization is used.
Returns
-------
eigval : `numpy.ndarray`
One-dimensional array of eigenvalues. Its length is equal
to the number of matrix rows.
eigvec : `numpy.ndarray`
Two-dimensional array of eigenvectors. It has the same shape
as the decomposed matrix.
See Also
--------
scipy.linalg.decomp.eigh :
Implementation of the decomposition. Standard parameters
are used here.
Raises
------
NotImplementedError
if the matrix is sparse (not supported by scipy 0.17)
"""
# Lazy import to improve `import odl` time
import scipy.linalg
import scipy.sparse
# TODO: fix dead link `scipy.linalg.decomp.eigh`
if scipy.sparse.isspmatrix(self.matrix):
raise NotImplementedError('sparse matrix not supported')
if cache is None:
cache = self._cache_mat_decomp
if self._eigval is None or self._eigvec is None:
eigval, eigvec = scipy.linalg.eigh(self.matrix)
if cache:
self._eigval = eigval
self._eigvec = eigvec
else:
eigval, eigvec = self._eigval, self._eigvec
return eigval, eigvec | Compute a Hermitian eigenbasis decomposition of the matrix.
Parameters
----------
cache : bool or None, optional
If ``True``, store the decomposition internally. For None,
the ``cache_mat_decomp`` from class initialization is used.
Returns
-------
eigval : `numpy.ndarray`
One-dimensional array of eigenvalues. Its length is equal
to the number of matrix rows.
eigvec : `numpy.ndarray`
Two-dimensional array of eigenvectors. It has the same shape
as the decomposed matrix.
See Also
--------
scipy.linalg.decomp.eigh :
Implementation of the decomposition. Standard parameters
are used here.
Raises
------
NotImplementedError
if the matrix is sparse (not supported by scipy 0.17) | Below is the the instruction that describes the task:
### Input:
Compute a Hermitian eigenbasis decomposition of the matrix.
Parameters
----------
cache : bool or None, optional
If ``True``, store the decomposition internally. For None,
the ``cache_mat_decomp`` from class initialization is used.
Returns
-------
eigval : `numpy.ndarray`
One-dimensional array of eigenvalues. Its length is equal
to the number of matrix rows.
eigvec : `numpy.ndarray`
Two-dimensional array of eigenvectors. It has the same shape
as the decomposed matrix.
See Also
--------
scipy.linalg.decomp.eigh :
Implementation of the decomposition. Standard parameters
are used here.
Raises
------
NotImplementedError
if the matrix is sparse (not supported by scipy 0.17)
### Response:
def matrix_decomp(self, cache=None):
"""Compute a Hermitian eigenbasis decomposition of the matrix.
Parameters
----------
cache : bool or None, optional
If ``True``, store the decomposition internally. For None,
the ``cache_mat_decomp`` from class initialization is used.
Returns
-------
eigval : `numpy.ndarray`
One-dimensional array of eigenvalues. Its length is equal
to the number of matrix rows.
eigvec : `numpy.ndarray`
Two-dimensional array of eigenvectors. It has the same shape
as the decomposed matrix.
See Also
--------
scipy.linalg.decomp.eigh :
Implementation of the decomposition. Standard parameters
are used here.
Raises
------
NotImplementedError
if the matrix is sparse (not supported by scipy 0.17)
"""
# Lazy import to improve `import odl` time
import scipy.linalg
import scipy.sparse
# TODO: fix dead link `scipy.linalg.decomp.eigh`
if scipy.sparse.isspmatrix(self.matrix):
raise NotImplementedError('sparse matrix not supported')
if cache is None:
cache = self._cache_mat_decomp
if self._eigval is None or self._eigvec is None:
eigval, eigvec = scipy.linalg.eigh(self.matrix)
if cache:
self._eigval = eigval
self._eigvec = eigvec
else:
eigval, eigvec = self._eigval, self._eigvec
return eigval, eigvec |
def diff_sizes(a, b, progressbar=None):
"""Return list of tuples where sizes differ.
Tuple structure:
(identifier, size in a, size in b)
Assumes list of identifiers in a and b are identical.
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples for all items with different sizes
"""
difference = []
for i in a.identifiers:
a_size = a.item_properties(i)["size_in_bytes"]
b_size = b.item_properties(i)["size_in_bytes"]
if a_size != b_size:
difference.append((i, a_size, b_size))
if progressbar:
progressbar.update(1)
return difference | Return list of tuples where sizes differ.
Tuple structure:
(identifier, size in a, size in b)
Assumes list of identifiers in a and b are identical.
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples for all items with different sizes | Below is the the instruction that describes the task:
### Input:
Return list of tuples where sizes differ.
Tuple structure:
(identifier, size in a, size in b)
Assumes list of identifiers in a and b are identical.
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples for all items with different sizes
### Response:
def diff_sizes(a, b, progressbar=None):
"""Return list of tuples where sizes differ.
Tuple structure:
(identifier, size in a, size in b)
Assumes list of identifiers in a and b are identical.
:param a: first :class:`dtoolcore.DataSet`
:param b: second :class:`dtoolcore.DataSet`
:returns: list of tuples for all items with different sizes
"""
difference = []
for i in a.identifiers:
a_size = a.item_properties(i)["size_in_bytes"]
b_size = b.item_properties(i)["size_in_bytes"]
if a_size != b_size:
difference.append((i, a_size, b_size))
if progressbar:
progressbar.update(1)
return difference |
def nearest_base(cls, bases):
'''Returns the closest ancestor to cls in bases.
'''
if cls in bases:
return cls
dists = {base: index(mro(cls), base) for base in bases}
dists2 = {dist: base for base, dist in dists.items() if dist is not None}
if not dists2:
return None
return dists2[min(dists2)] | Returns the closest ancestor to cls in bases. | Below is the the instruction that describes the task:
### Input:
Returns the closest ancestor to cls in bases.
### Response:
def nearest_base(cls, bases):
'''Returns the closest ancestor to cls in bases.
'''
if cls in bases:
return cls
dists = {base: index(mro(cls), base) for base in bases}
dists2 = {dist: base for base, dist in dists.items() if dist is not None}
if not dists2:
return None
return dists2[min(dists2)] |
def _responsive_sleep(self, seconds, wait_log_interval=0, wait_reason=''):
"""When there is litte work to do, the queuing thread sleeps a lot.
It can't sleep for too long without checking for the quit flag and/or
logging about why it is sleeping.
parameters:
seconds - the number of seconds to sleep
wait_log_interval - while sleeping, it is helpful if the thread
periodically announces itself so that we
know that it is still alive. This number is
the time in seconds between log entries.
wait_reason - the is for the explaination of why the thread is
sleeping. This is likely to be a message like:
'there is no work to do'.
This was also partially motivated by old versions' of Python inability
to KeyboardInterrupt out of a long sleep()."""
for x in xrange(int(seconds)):
self.quit_check()
if wait_log_interval and not x % wait_log_interval:
self.logger.info('%s: %dsec of %dsec',
wait_reason,
x,
seconds)
self.quit_check()
time.sleep(1.0) | When there is litte work to do, the queuing thread sleeps a lot.
It can't sleep for too long without checking for the quit flag and/or
logging about why it is sleeping.
parameters:
seconds - the number of seconds to sleep
wait_log_interval - while sleeping, it is helpful if the thread
periodically announces itself so that we
know that it is still alive. This number is
the time in seconds between log entries.
wait_reason - the is for the explaination of why the thread is
sleeping. This is likely to be a message like:
'there is no work to do'.
This was also partially motivated by old versions' of Python inability
to KeyboardInterrupt out of a long sleep(). | Below is the the instruction that describes the task:
### Input:
When there is litte work to do, the queuing thread sleeps a lot.
It can't sleep for too long without checking for the quit flag and/or
logging about why it is sleeping.
parameters:
seconds - the number of seconds to sleep
wait_log_interval - while sleeping, it is helpful if the thread
periodically announces itself so that we
know that it is still alive. This number is
the time in seconds between log entries.
wait_reason - the is for the explaination of why the thread is
sleeping. This is likely to be a message like:
'there is no work to do'.
This was also partially motivated by old versions' of Python inability
to KeyboardInterrupt out of a long sleep().
### Response:
def _responsive_sleep(self, seconds, wait_log_interval=0, wait_reason=''):
"""When there is litte work to do, the queuing thread sleeps a lot.
It can't sleep for too long without checking for the quit flag and/or
logging about why it is sleeping.
parameters:
seconds - the number of seconds to sleep
wait_log_interval - while sleeping, it is helpful if the thread
periodically announces itself so that we
know that it is still alive. This number is
the time in seconds between log entries.
wait_reason - the is for the explaination of why the thread is
sleeping. This is likely to be a message like:
'there is no work to do'.
This was also partially motivated by old versions' of Python inability
to KeyboardInterrupt out of a long sleep()."""
for x in xrange(int(seconds)):
self.quit_check()
if wait_log_interval and not x % wait_log_interval:
self.logger.info('%s: %dsec of %dsec',
wait_reason,
x,
seconds)
self.quit_check()
time.sleep(1.0) |
def repo_exists(self, auth, username, repo_name):
"""
Returns whether a repository with name ``repo_name`` owned by the user with username ``username`` exists.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository
:return: whether the repository exists
:rtype: bool
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = "/repos/{u}/{r}".format(u=username, r=repo_name)
return self._get(path, auth=auth).ok | Returns whether a repository with name ``repo_name`` owned by the user with username ``username`` exists.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository
:return: whether the repository exists
:rtype: bool
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced | Below is the the instruction that describes the task:
### Input:
Returns whether a repository with name ``repo_name`` owned by the user with username ``username`` exists.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository
:return: whether the repository exists
:rtype: bool
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
### Response:
def repo_exists(self, auth, username, repo_name):
"""
Returns whether a repository with name ``repo_name`` owned by the user with username ``username`` exists.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository
:return: whether the repository exists
:rtype: bool
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = "/repos/{u}/{r}".format(u=username, r=repo_name)
return self._get(path, auth=auth).ok |
def loadUnStructuredGrid(filename): # not tested
"""Load a ``vtkunStructuredGrid`` object from file and return a ``Actor(vtkActor)`` object."""
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(filename)
reader.Update()
gf = vtk.vtkUnstructuredGridGeometryFilter()
gf.SetInputConnection(reader.GetOutputPort())
gf.Update()
return Actor(gf.GetOutput()) | Load a ``vtkunStructuredGrid`` object from file and return a ``Actor(vtkActor)`` object. | Below is the the instruction that describes the task:
### Input:
Load a ``vtkunStructuredGrid`` object from file and return a ``Actor(vtkActor)`` object.
### Response:
def loadUnStructuredGrid(filename): # not tested
"""Load a ``vtkunStructuredGrid`` object from file and return a ``Actor(vtkActor)`` object."""
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(filename)
reader.Update()
gf = vtk.vtkUnstructuredGridGeometryFilter()
gf.SetInputConnection(reader.GetOutputPort())
gf.Update()
return Actor(gf.GetOutput()) |
def _create_parser(self, html_parser, current_url):
"""
Create the tinycss stylesheet.
:param html_parser: The HTML parser.
:type html_parser: hatemile.util.html.htmldomparser.HTMLDOMParser
:param current_url: The current URL of page.
:type current_url: str
"""
css_code = ''
elements = html_parser.find(
'style,link[rel="stylesheet"]'
).list_results()
for element in elements:
if element.get_tag_name() == 'STYLE':
css_code = css_code + element.get_text_content()
else:
css_code = css_code + requests.get(
urljoin(current_url, element.get_attribute('href'))
).text
self.stylesheet = tinycss.make_parser().parse_stylesheet(css_code) | Create the tinycss stylesheet.
:param html_parser: The HTML parser.
:type html_parser: hatemile.util.html.htmldomparser.HTMLDOMParser
:param current_url: The current URL of page.
:type current_url: str | Below is the the instruction that describes the task:
### Input:
Create the tinycss stylesheet.
:param html_parser: The HTML parser.
:type html_parser: hatemile.util.html.htmldomparser.HTMLDOMParser
:param current_url: The current URL of page.
:type current_url: str
### Response:
def _create_parser(self, html_parser, current_url):
"""
Create the tinycss stylesheet.
:param html_parser: The HTML parser.
:type html_parser: hatemile.util.html.htmldomparser.HTMLDOMParser
:param current_url: The current URL of page.
:type current_url: str
"""
css_code = ''
elements = html_parser.find(
'style,link[rel="stylesheet"]'
).list_results()
for element in elements:
if element.get_tag_name() == 'STYLE':
css_code = css_code + element.get_text_content()
else:
css_code = css_code + requests.get(
urljoin(current_url, element.get_attribute('href'))
).text
self.stylesheet = tinycss.make_parser().parse_stylesheet(css_code) |
def os_tree(directory, enable_scandir=False):
"""
Return a directories contents as a dictionary hierarchy.
.. code:: python
reusables.os_tree(".")
# {'doc': {'build': {'doctrees': {},
# 'html': {'_sources': {}, '_static': {}}},
# 'source': {}},
# 'reusables': {'__pycache__': {}},
# 'test': {'__pycache__': {}, 'data': {}}}
:param directory: path to directory to created the tree of.
:param enable_scandir: on python < 3.5 enable external scandir package
:return: dictionary of the directory
"""
if not os.path.exists(directory):
raise OSError("Directory does not exist")
if not os.path.isdir(directory):
raise OSError("Path is not a directory")
full_list = []
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
full_list.extend([os.path.join(root, d).lstrip(directory) + os.sep
for d in dirs])
tree = {os.path.basename(directory): {}}
for item in full_list:
separated = item.split(os.sep)
is_dir = separated[-1:] == ['']
if is_dir:
separated = separated[:-1]
parent = tree[os.path.basename(directory)]
for index, path in enumerate(separated):
if path in parent:
parent = parent[path]
continue
else:
parent[path] = dict()
parent = parent[path]
return tree | Return a directories contents as a dictionary hierarchy.
.. code:: python
reusables.os_tree(".")
# {'doc': {'build': {'doctrees': {},
# 'html': {'_sources': {}, '_static': {}}},
# 'source': {}},
# 'reusables': {'__pycache__': {}},
# 'test': {'__pycache__': {}, 'data': {}}}
:param directory: path to directory to created the tree of.
:param enable_scandir: on python < 3.5 enable external scandir package
:return: dictionary of the directory | Below is the the instruction that describes the task:
### Input:
Return a directories contents as a dictionary hierarchy.
.. code:: python
reusables.os_tree(".")
# {'doc': {'build': {'doctrees': {},
# 'html': {'_sources': {}, '_static': {}}},
# 'source': {}},
# 'reusables': {'__pycache__': {}},
# 'test': {'__pycache__': {}, 'data': {}}}
:param directory: path to directory to created the tree of.
:param enable_scandir: on python < 3.5 enable external scandir package
:return: dictionary of the directory
### Response:
def os_tree(directory, enable_scandir=False):
"""
Return a directories contents as a dictionary hierarchy.
.. code:: python
reusables.os_tree(".")
# {'doc': {'build': {'doctrees': {},
# 'html': {'_sources': {}, '_static': {}}},
# 'source': {}},
# 'reusables': {'__pycache__': {}},
# 'test': {'__pycache__': {}, 'data': {}}}
:param directory: path to directory to created the tree of.
:param enable_scandir: on python < 3.5 enable external scandir package
:return: dictionary of the directory
"""
if not os.path.exists(directory):
raise OSError("Directory does not exist")
if not os.path.isdir(directory):
raise OSError("Path is not a directory")
full_list = []
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
full_list.extend([os.path.join(root, d).lstrip(directory) + os.sep
for d in dirs])
tree = {os.path.basename(directory): {}}
for item in full_list:
separated = item.split(os.sep)
is_dir = separated[-1:] == ['']
if is_dir:
separated = separated[:-1]
parent = tree[os.path.basename(directory)]
for index, path in enumerate(separated):
if path in parent:
parent = parent[path]
continue
else:
parent[path] = dict()
parent = parent[path]
return tree |
def create(cls, name, user_group=None, activation_date=None,
expiration_date=None, comment=None):
"""
Create an internal user.
Add a user example::
InternalUser.create(name='goog', comment='my comment')
:param str name: name of user that is displayed in SMC
:param list(str,InternalUserGroup) user_group: internal user groups
which to add this user to.
:param datetime activation_date: activation date as datetime object.
Activation date only supports year and month/day
:param datetime expiration_date: expiration date as datetime object.
Expiration date only supports year and month/day
:param str comment: optional comment
:raises ElementNotFound: thrown if group specified does not exist
:rtype: InternalUser
"""
json = {
'name': name,
'unique_id': 'cn={},{}'.format(name, InternalUserDomain.user_dn),
'comment': comment}
limits = {'activation_date': activation_date, 'expiration_date': expiration_date}
for attr, value in limits.items():
json[attr] = datetime_to_ms(value) if value else None
if user_group:
json.update(user_group=element_resolver(user_group))
return ElementCreator(cls, json) | Create an internal user.
Add a user example::
InternalUser.create(name='goog', comment='my comment')
:param str name: name of user that is displayed in SMC
:param list(str,InternalUserGroup) user_group: internal user groups
which to add this user to.
:param datetime activation_date: activation date as datetime object.
Activation date only supports year and month/day
:param datetime expiration_date: expiration date as datetime object.
Expiration date only supports year and month/day
:param str comment: optional comment
:raises ElementNotFound: thrown if group specified does not exist
:rtype: InternalUser | Below is the the instruction that describes the task:
### Input:
Create an internal user.
Add a user example::
InternalUser.create(name='goog', comment='my comment')
:param str name: name of user that is displayed in SMC
:param list(str,InternalUserGroup) user_group: internal user groups
which to add this user to.
:param datetime activation_date: activation date as datetime object.
Activation date only supports year and month/day
:param datetime expiration_date: expiration date as datetime object.
Expiration date only supports year and month/day
:param str comment: optional comment
:raises ElementNotFound: thrown if group specified does not exist
:rtype: InternalUser
### Response:
def create(cls, name, user_group=None, activation_date=None,
expiration_date=None, comment=None):
"""
Create an internal user.
Add a user example::
InternalUser.create(name='goog', comment='my comment')
:param str name: name of user that is displayed in SMC
:param list(str,InternalUserGroup) user_group: internal user groups
which to add this user to.
:param datetime activation_date: activation date as datetime object.
Activation date only supports year and month/day
:param datetime expiration_date: expiration date as datetime object.
Expiration date only supports year and month/day
:param str comment: optional comment
:raises ElementNotFound: thrown if group specified does not exist
:rtype: InternalUser
"""
json = {
'name': name,
'unique_id': 'cn={},{}'.format(name, InternalUserDomain.user_dn),
'comment': comment}
limits = {'activation_date': activation_date, 'expiration_date': expiration_date}
for attr, value in limits.items():
json[attr] = datetime_to_ms(value) if value else None
if user_group:
json.update(user_group=element_resolver(user_group))
return ElementCreator(cls, json) |
def save(self):
"""
Save the entire configuration files
"""
self.projects.save()
self.experiments.save()
safe_dump(self.global_config, self._globals_file,
default_flow_style=False) | Save the entire configuration files | Below is the the instruction that describes the task:
### Input:
Save the entire configuration files
### Response:
def save(self):
"""
Save the entire configuration files
"""
self.projects.save()
self.experiments.save()
safe_dump(self.global_config, self._globals_file,
default_flow_style=False) |
def _resolve_dtype(data_type):
"""Retrieve the corresponding NumPy's `dtype` for a given data type."""
if isinstance(data_type, _FIXED_ATOMIC):
out = _get_atomic_dtype(data_type)
elif isinstance(data_type, _FLEXIBLE_ATOMIC):
out = (_get_atomic_dtype(data_type), data_type.length)
elif isinstance(data_type, Array):
shape = data_type.shape
if isinstance(shape, _SEQUENCE_TYPES) and len(shape) == 1:
# Workaround the exception `ValueError: invalid itemsize in
# generic type tuple` when an `Array` of shape 0 or (0,) is nested
# within another `Array`.
shape = shape[0]
out = (_resolve_dtype(data_type.element_type), shape)
elif isinstance(data_type, Structure):
out = [(field.name, _resolve_dtype(field.type))
for field in data_type.fields]
return out | Retrieve the corresponding NumPy's `dtype` for a given data type. | Below is the the instruction that describes the task:
### Input:
Retrieve the corresponding NumPy's `dtype` for a given data type.
### Response:
def _resolve_dtype(data_type):
"""Retrieve the corresponding NumPy's `dtype` for a given data type."""
if isinstance(data_type, _FIXED_ATOMIC):
out = _get_atomic_dtype(data_type)
elif isinstance(data_type, _FLEXIBLE_ATOMIC):
out = (_get_atomic_dtype(data_type), data_type.length)
elif isinstance(data_type, Array):
shape = data_type.shape
if isinstance(shape, _SEQUENCE_TYPES) and len(shape) == 1:
# Workaround the exception `ValueError: invalid itemsize in
# generic type tuple` when an `Array` of shape 0 or (0,) is nested
# within another `Array`.
shape = shape[0]
out = (_resolve_dtype(data_type.element_type), shape)
elif isinstance(data_type, Structure):
out = [(field.name, _resolve_dtype(field.type))
for field in data_type.fields]
return out |
def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values | Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume. | Below is the the instruction that describes the task:
### Input:
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
### Response:
def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values |
def observed_data_to_xarray(self):
"""Convert observed data to xarray."""
if self.observed is None:
return None
observed_data = {}
if isinstance(self.observed, self.tf.Tensor):
with self.tf.Session() as sess:
vals = sess.run(self.observed, feed_dict=self.feed_dict)
else:
vals = self.observed
if self.dims is None:
dims = {}
else:
dims = self.dims
name = "obs"
val_dims = dims.get(name)
vals = np.atleast_1d(vals)
val_dims, coords = generate_dims_coords(vals.shape, name, dims=val_dims, coords=self.coords)
# coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}
observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.tfp)) | Convert observed data to xarray. | Below is the the instruction that describes the task:
### Input:
Convert observed data to xarray.
### Response:
def observed_data_to_xarray(self):
"""Convert observed data to xarray."""
if self.observed is None:
return None
observed_data = {}
if isinstance(self.observed, self.tf.Tensor):
with self.tf.Session() as sess:
vals = sess.run(self.observed, feed_dict=self.feed_dict)
else:
vals = self.observed
if self.dims is None:
dims = {}
else:
dims = self.dims
name = "obs"
val_dims = dims.get(name)
vals = np.atleast_1d(vals)
val_dims, coords = generate_dims_coords(vals.shape, name, dims=val_dims, coords=self.coords)
# coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}
observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.tfp)) |
def create_app(*, debug=False, threads=1, bigchaindb_factory=None):
"""Return an instance of the Flask application.
Args:
debug (bool): a flag to activate the debug mode for the app
(default: False).
threads (int): number of threads to use
Return:
an instance of the Flask application.
"""
if not bigchaindb_factory:
bigchaindb_factory = BigchainDB
app = Flask(__name__)
app.wsgi_app = StripContentTypeMiddleware(app.wsgi_app)
CORS(app)
app.debug = debug
app.config['bigchain_pool'] = utils.pool(bigchaindb_factory, size=threads)
add_routes(app)
return app | Return an instance of the Flask application.
Args:
debug (bool): a flag to activate the debug mode for the app
(default: False).
threads (int): number of threads to use
Return:
an instance of the Flask application. | Below is the the instruction that describes the task:
### Input:
Return an instance of the Flask application.
Args:
debug (bool): a flag to activate the debug mode for the app
(default: False).
threads (int): number of threads to use
Return:
an instance of the Flask application.
### Response:
def create_app(*, debug=False, threads=1, bigchaindb_factory=None):
"""Return an instance of the Flask application.
Args:
debug (bool): a flag to activate the debug mode for the app
(default: False).
threads (int): number of threads to use
Return:
an instance of the Flask application.
"""
if not bigchaindb_factory:
bigchaindb_factory = BigchainDB
app = Flask(__name__)
app.wsgi_app = StripContentTypeMiddleware(app.wsgi_app)
CORS(app)
app.debug = debug
app.config['bigchain_pool'] = utils.pool(bigchaindb_factory, size=threads)
add_routes(app)
return app |
def get_signature(payment_request):
"""
Returns the signature for the transaction.
To compute the signature, first you have to get the value of all
the fields that starts by 'vads_', ordering them alphabetically.
All the values are separated by the '+' character. Then you add
the value of the payzen certificate.
Finaly you hash the string using sha1."""
vads_args = {}
for field in payment_request._meta.fields:
if field.name[:5] == 'vads_':
field_value = field.value_from_object(payment_request)
if field_value:
vads_args.update({
field.name: field_value
})
base_str = ''
for key in sorted(vads_args):
base_str += str(vads_args[key]) + '+'
base_str += app_settings.VADS_CERTIFICATE
return hashlib.sha1(base_str.encode("utf-8")).hexdigest() | Returns the signature for the transaction.
To compute the signature, first you have to get the value of all
the fields that starts by 'vads_', ordering them alphabetically.
All the values are separated by the '+' character. Then you add
the value of the payzen certificate.
Finaly you hash the string using sha1. | Below is the the instruction that describes the task:
### Input:
Returns the signature for the transaction.
To compute the signature, first you have to get the value of all
the fields that starts by 'vads_', ordering them alphabetically.
All the values are separated by the '+' character. Then you add
the value of the payzen certificate.
Finaly you hash the string using sha1.
### Response:
def get_signature(payment_request):
"""
Returns the signature for the transaction.
To compute the signature, first you have to get the value of all
the fields that starts by 'vads_', ordering them alphabetically.
All the values are separated by the '+' character. Then you add
the value of the payzen certificate.
Finaly you hash the string using sha1."""
vads_args = {}
for field in payment_request._meta.fields:
if field.name[:5] == 'vads_':
field_value = field.value_from_object(payment_request)
if field_value:
vads_args.update({
field.name: field_value
})
base_str = ''
for key in sorted(vads_args):
base_str += str(vads_args[key]) + '+'
base_str += app_settings.VADS_CERTIFICATE
return hashlib.sha1(base_str.encode("utf-8")).hexdigest() |
def cable_from_html(html, reference_id=None):
"""\
Returns a cable from the provided HTML page.
`html`
The HTML page of the cable
`reference_id`
The reference identifier of the cable. If the reference_id is ``None``
this function tries to detect it.
"""
if not html:
raise ValueError('The HTML page of the cable must be provided, got: "%r"' % html)
if not reference_id:
reference_id = reader.reference_id_from_html(html)
cable = Cable(reference_id)
reader.parse_meta(html, cable)
cable.header = reader.get_header_as_text(html, reference_id)
cable.content = reader.get_content_as_text(html, reference_id)
return cable | \
Returns a cable from the provided HTML page.
`html`
The HTML page of the cable
`reference_id`
The reference identifier of the cable. If the reference_id is ``None``
this function tries to detect it. | Below is the the instruction that describes the task:
### Input:
\
Returns a cable from the provided HTML page.
`html`
The HTML page of the cable
`reference_id`
The reference identifier of the cable. If the reference_id is ``None``
this function tries to detect it.
### Response:
def cable_from_html(html, reference_id=None):
"""\
Returns a cable from the provided HTML page.
`html`
The HTML page of the cable
`reference_id`
The reference identifier of the cable. If the reference_id is ``None``
this function tries to detect it.
"""
if not html:
raise ValueError('The HTML page of the cable must be provided, got: "%r"' % html)
if not reference_id:
reference_id = reader.reference_id_from_html(html)
cable = Cable(reference_id)
reader.parse_meta(html, cable)
cable.header = reader.get_header_as_text(html, reference_id)
cable.content = reader.get_content_as_text(html, reference_id)
return cable |
def render_arrow(self, label, start, end, direction, i):
"""Render individual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup.
"""
level = self.levels.index(end - start) + 1
x_start = self.offset_x + start * self.distance + self.arrow_spacing
if self.direction == "rtl":
x_start = self.width - x_start
y = self.offset_y
x_end = (
self.offset_x
+ (end - start) * self.distance
+ start * self.distance
- self.arrow_spacing * (self.highest_level - level) / 4
)
if self.direction == "rtl":
x_end = self.width - x_end
y_curve = self.offset_y - level * self.distance / 2
if self.compact:
y_curve = self.offset_y - level * self.distance / 6
if y_curve == 0 and len(self.levels) > 5:
y_curve = -self.distance
arrowhead = self.get_arrowhead(direction, x_start, y, x_end)
arc = self.get_arc(x_start, y, y_curve, x_end)
label_side = "right" if self.direction == "rtl" else "left"
return TPL_DEP_ARCS.format(
id=self.id,
i=i,
stroke=self.arrow_stroke,
head=arrowhead,
label=label,
label_side=label_side,
arc=arc,
) | Render individual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup. | Below is the the instruction that describes the task:
### Input:
Render individual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup.
### Response:
def render_arrow(self, label, start, end, direction, i):
"""Render individual arrow.
label (unicode): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (unicode): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (unicode): Rendered SVG markup.
"""
level = self.levels.index(end - start) + 1
x_start = self.offset_x + start * self.distance + self.arrow_spacing
if self.direction == "rtl":
x_start = self.width - x_start
y = self.offset_y
x_end = (
self.offset_x
+ (end - start) * self.distance
+ start * self.distance
- self.arrow_spacing * (self.highest_level - level) / 4
)
if self.direction == "rtl":
x_end = self.width - x_end
y_curve = self.offset_y - level * self.distance / 2
if self.compact:
y_curve = self.offset_y - level * self.distance / 6
if y_curve == 0 and len(self.levels) > 5:
y_curve = -self.distance
arrowhead = self.get_arrowhead(direction, x_start, y, x_end)
arc = self.get_arc(x_start, y, y_curve, x_end)
label_side = "right" if self.direction == "rtl" else "left"
return TPL_DEP_ARCS.format(
id=self.id,
i=i,
stroke=self.arrow_stroke,
head=arrowhead,
label=label,
label_side=label_side,
arc=arc,
) |
def list_tables(self):
"""
Runs the ``\\dt`` command and returns a list of column values with
information about all tables in the database.
"""
lines = output_lines(self.exec_psql('\\dt'))
return [line.split('|') for line in lines] | Runs the ``\\dt`` command and returns a list of column values with
information about all tables in the database. | Below is the the instruction that describes the task:
### Input:
Runs the ``\\dt`` command and returns a list of column values with
information about all tables in the database.
### Response:
def list_tables(self):
"""
Runs the ``\\dt`` command and returns a list of column values with
information about all tables in the database.
"""
lines = output_lines(self.exec_psql('\\dt'))
return [line.split('|') for line in lines] |
def create_index(self, table_name, attr_name):
"""
:param str table_name:
Table name that contains the attribute to be indexed.
:param str attr_name: Attribute name to create index.
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
"""
self.verify_table_existence(table_name)
self.validate_access_permission(["w", "a"])
query_format = "CREATE INDEX IF NOT EXISTS {index:s} ON {table}({attr})"
query = query_format.format(
index=make_index_name(table_name, attr_name),
table=Table(table_name),
attr=Attr(attr_name),
)
logger.debug(query)
self.execute_query(query, logging.getLogger().findCaller()) | :param str table_name:
Table name that contains the attribute to be indexed.
:param str attr_name: Attribute name to create index.
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence| | Below is the the instruction that describes the task:
### Input:
:param str table_name:
Table name that contains the attribute to be indexed.
:param str attr_name: Attribute name to create index.
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
### Response:
def create_index(self, table_name, attr_name):
"""
:param str table_name:
Table name that contains the attribute to be indexed.
:param str attr_name: Attribute name to create index.
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
"""
self.verify_table_existence(table_name)
self.validate_access_permission(["w", "a"])
query_format = "CREATE INDEX IF NOT EXISTS {index:s} ON {table}({attr})"
query = query_format.format(
index=make_index_name(table_name, attr_name),
table=Table(table_name),
attr=Attr(attr_name),
)
logger.debug(query)
self.execute_query(query, logging.getLogger().findCaller()) |
def _oai_to_xml(marc_oai): # TODO: move this to MARC XML parser?
"""
Convert OAI to MARC XML.
Args:
marc_oai (str): String with either OAI or MARC XML.
Returns:
str: String with MARC XML.
"""
record = MARCXMLRecord(marc_oai)
record.oai_marc = False
return record.to_XML() | Convert OAI to MARC XML.
Args:
marc_oai (str): String with either OAI or MARC XML.
Returns:
str: String with MARC XML. | Below is the the instruction that describes the task:
### Input:
Convert OAI to MARC XML.
Args:
marc_oai (str): String with either OAI or MARC XML.
Returns:
str: String with MARC XML.
### Response:
def _oai_to_xml(marc_oai): # TODO: move this to MARC XML parser?
"""
Convert OAI to MARC XML.
Args:
marc_oai (str): String with either OAI or MARC XML.
Returns:
str: String with MARC XML.
"""
record = MARCXMLRecord(marc_oai)
record.oai_marc = False
return record.to_XML() |
def get_connection(self, address):
"""Create or retrieve a muxed connection
Arguments:
address -- a peer endpoint in IPv4/v6 address format; None refers
to the connection for unknown peers
Return:
a bound, connected datagram socket instance
"""
if self.connections.has_key(address):
return self.connections[address]
# We need a new datagram socket on a dynamically assigned ephemeral port
conn = socket.socket(self._forwarding_socket.family,
self._forwarding_socket.type,
self._forwarding_socket.proto)
conn.bind((self._forwarding_socket.getsockname()[0], 0))
conn.connect(self._forwarding_socket.getsockname())
if not address:
conn.setblocking(0)
self.connections[address] = conn
_logger.debug("Created new connection for address: %s", address)
return conn | Create or retrieve a muxed connection
Arguments:
address -- a peer endpoint in IPv4/v6 address format; None refers
to the connection for unknown peers
Return:
a bound, connected datagram socket instance | Below is the the instruction that describes the task:
### Input:
Create or retrieve a muxed connection
Arguments:
address -- a peer endpoint in IPv4/v6 address format; None refers
to the connection for unknown peers
Return:
a bound, connected datagram socket instance
### Response:
def get_connection(self, address):
"""Create or retrieve a muxed connection
Arguments:
address -- a peer endpoint in IPv4/v6 address format; None refers
to the connection for unknown peers
Return:
a bound, connected datagram socket instance
"""
if self.connections.has_key(address):
return self.connections[address]
# We need a new datagram socket on a dynamically assigned ephemeral port
conn = socket.socket(self._forwarding_socket.family,
self._forwarding_socket.type,
self._forwarding_socket.proto)
conn.bind((self._forwarding_socket.getsockname()[0], 0))
conn.connect(self._forwarding_socket.getsockname())
if not address:
conn.setblocking(0)
self.connections[address] = conn
_logger.debug("Created new connection for address: %s", address)
return conn |
def http_auth(self):
"""
Returns ``True`` if valid http auth credentials are found in the
request header.
"""
if 'HTTP_AUTHORIZATION' in self.request.META.keys():
authmeth, auth = self.request.META['HTTP_AUTHORIZATION'].split(
' ', 1)
if authmeth.lower() == 'basic':
auth = auth.strip().decode('base64')
identifier, password = auth.split(':', 1)
username = get_username(identifier)
user = authenticate(username=username, password=password)
if user:
login(self.request, user)
return True | Returns ``True`` if valid http auth credentials are found in the
request header. | Below is the the instruction that describes the task:
### Input:
Returns ``True`` if valid http auth credentials are found in the
request header.
### Response:
def http_auth(self):
"""
Returns ``True`` if valid http auth credentials are found in the
request header.
"""
if 'HTTP_AUTHORIZATION' in self.request.META.keys():
authmeth, auth = self.request.META['HTTP_AUTHORIZATION'].split(
' ', 1)
if authmeth.lower() == 'basic':
auth = auth.strip().decode('base64')
identifier, password = auth.split(':', 1)
username = get_username(identifier)
user = authenticate(username=username, password=password)
if user:
login(self.request, user)
return True |
def process(self, tup):
"""Process steps:
1. Stream third positional value from input into Kafka topic.
"""
status_seq = self.iter_using_shelf(tup.values[2], self.tweet_shelf)
# This could be more efficient by passing the result from twitter
# straight through to the producer, instead of deserializing and
# reserializing json.
self.producer.produce(json.dumps(status) for status in status_seq) | Process steps:
1. Stream third positional value from input into Kafka topic. | Below is the the instruction that describes the task:
### Input:
Process steps:
1. Stream third positional value from input into Kafka topic.
### Response:
def process(self, tup):
"""Process steps:
1. Stream third positional value from input into Kafka topic.
"""
status_seq = self.iter_using_shelf(tup.values[2], self.tweet_shelf)
# This could be more efficient by passing the result from twitter
# straight through to the producer, instead of deserializing and
# reserializing json.
self.producer.produce(json.dumps(status) for status in status_seq) |
def _from_dict(cls, _dict):
"""Initialize a Face object from a json dictionary."""
args = {}
if 'age' in _dict:
args['age'] = FaceAge._from_dict(_dict.get('age'))
if 'gender' in _dict:
args['gender'] = FaceGender._from_dict(_dict.get('gender'))
if 'face_location' in _dict:
args['face_location'] = FaceLocation._from_dict(
_dict.get('face_location'))
return cls(**args) | Initialize a Face object from a json dictionary. | Below is the the instruction that describes the task:
### Input:
Initialize a Face object from a json dictionary.
### Response:
def _from_dict(cls, _dict):
"""Initialize a Face object from a json dictionary."""
args = {}
if 'age' in _dict:
args['age'] = FaceAge._from_dict(_dict.get('age'))
if 'gender' in _dict:
args['gender'] = FaceGender._from_dict(_dict.get('gender'))
if 'face_location' in _dict:
args['face_location'] = FaceLocation._from_dict(
_dict.get('face_location'))
return cls(**args) |
def last_instr(self, start, end, instr, target=None, exact=True):
"""
Find the last <instr> in the block from start to end.
<instr> is any python bytecode instruction or a list of opcodes
If <instr> is an opcode with a target (like a jump), a target
destination can be specified which must match precisely if exact
is True, or if exact is False, the instruction which has a target
closest to <target> will be returned.
Return index to it or None if not found.
"""
code = self.code
# Make sure requested positions do not go out of
# code bounds
if not (start >= 0 and end <= len(code)):
return None
try:
None in instr
except:
instr = [instr]
result_offset = None
current_distance = self.insts[-1].offset - self.insts[0].offset
extended_arg = 0
# FIXME: use self.insts rather than code[]
for offset in self.op_range(start, end):
op = code[offset]
if op == self.opc.EXTENDED_ARG:
arg = code2num(code, offset+1) | extended_arg
extended_arg = extended_arg_val(self.opc, arg)
continue
if op in instr:
if target is None:
result_offset = offset
else:
dest = self.get_target(offset, extended_arg)
if dest == target:
current_distance = 0
result_offset = offset
elif not exact:
new_distance = abs(target - dest)
if new_distance <= current_distance:
current_distance = new_distance
result_offset = offset
pass
pass
pass
pass
extended_arg = 0
pass
return result_offset | Find the last <instr> in the block from start to end.
<instr> is any python bytecode instruction or a list of opcodes
If <instr> is an opcode with a target (like a jump), a target
destination can be specified which must match precisely if exact
is True, or if exact is False, the instruction which has a target
closest to <target> will be returned.
Return index to it or None if not found. | Below is the the instruction that describes the task:
### Input:
Find the last <instr> in the block from start to end.
<instr> is any python bytecode instruction or a list of opcodes
If <instr> is an opcode with a target (like a jump), a target
destination can be specified which must match precisely if exact
is True, or if exact is False, the instruction which has a target
closest to <target> will be returned.
Return index to it or None if not found.
### Response:
def last_instr(self, start, end, instr, target=None, exact=True):
"""
Find the last <instr> in the block from start to end.
<instr> is any python bytecode instruction or a list of opcodes
If <instr> is an opcode with a target (like a jump), a target
destination can be specified which must match precisely if exact
is True, or if exact is False, the instruction which has a target
closest to <target> will be returned.
Return index to it or None if not found.
"""
code = self.code
# Make sure requested positions do not go out of
# code bounds
if not (start >= 0 and end <= len(code)):
return None
try:
None in instr
except:
instr = [instr]
result_offset = None
current_distance = self.insts[-1].offset - self.insts[0].offset
extended_arg = 0
# FIXME: use self.insts rather than code[]
for offset in self.op_range(start, end):
op = code[offset]
if op == self.opc.EXTENDED_ARG:
arg = code2num(code, offset+1) | extended_arg
extended_arg = extended_arg_val(self.opc, arg)
continue
if op in instr:
if target is None:
result_offset = offset
else:
dest = self.get_target(offset, extended_arg)
if dest == target:
current_distance = 0
result_offset = offset
elif not exact:
new_distance = abs(target - dest)
if new_distance <= current_distance:
current_distance = new_distance
result_offset = offset
pass
pass
pass
pass
extended_arg = 0
pass
return result_offset |
def align_rasters(raster, alignraster, how=np.ma.mean, cxsize=None, cysize=None, masked=False):
'''
Align two rasters so that data overlaps by geographical location
Usage:
(alignedraster_o, alignedraster_a, geot_a) = AlignRasters(raster, alignraster, how=np.mean)
where:
raster: string with location of raster to be aligned
alignraster: string with location of raster to which raster will be aligned
how: function used to aggregate cells (if the rasters have different sizes)
It is assumed that both rasters have the same size
'''
ndv1, xsize1, ysize1, geot1, projection1, datatype1 = get_geo_info(raster)
ndv2, xsize2, ysize2, geot2, projection2, datatype2 = get_geo_info(alignraster)
if projection1.ExportToMICoordSys() == projection2.ExportToMICoordSys():
blocksize = (np.round(geot2[1]/geot1[1]).astype(int), np.round(geot2[-1]/geot1[-1]).astype(int))
mraster = gdalnumeric.LoadFile(raster)
mraster = np.ma.masked_array(mraster, mask=mraster == ndv1, fill_value=ndv1)
mmin = mraster.min()
mraster = block_reduce(mraster, blocksize, func=how)
araster = gdalnumeric.LoadFile(alignraster)
araster = np.ma.masked_array(araster, mask=araster == ndv2, fill_value=ndv2)
amin = araster.min()
if geot1[0] <= geot2[0]:
row3, mcol = map_pixel(geot2[0], geot2[3], geot1[1] *blocksize[0],
geot1[-1]*blocksize[1], geot1[0], geot1[3])
acol = 0
else:
row3, acol = map_pixel(geot1[0], geot1[3], geot2[1], geot2[-1], geot2[0], geot2[3])
mcol = 0
if geot1[3] <= geot2[3]:
arow, col3 = map_pixel(geot1[0], geot1[3], geot2[1], geot2[-1], geot2[0], geot2[3])
mrow = 0
else:
mrow, col3 = map_pixel(geot2[0], geot2[3], geot1[1] *blocksize[0],
geot1[-1]*blocksize[1], geot1[0], geot1[3])
arow = 0
'''
col3,row3 = map_pixel(geot1[0], geot1[3], geot2[1],geot2[-1], geot2[0], geot2[3])
col3 = max(0,col3)
row3 = max(0,row3)
araster = araster[row3:,col3:]
col3,row3 = map_pixel(geot2[0], geot2[3], geot1[1] *blocksize[0],
geot1[-1]*blocksize[1], geot1[0], geot1[3])
col3 = max(0,abs(col3))
row3 = max(0,np.abs(row3))
mraster = mraster[row3:,col3:]
'''
mraster = mraster[mrow:, mcol:]
araster = araster[arow:, acol:]
if cxsize and cysize:
araster = araster[:cysize, :cxsize]
mraster = mraster[:cysize, :cxsize]
else:
rows = min(araster.shape[0], mraster.shape[0])
cols = min(araster.shape[1], mraster.shape[1])
araster = araster[:rows, :cols]
mraster = mraster[:rows, :cols]
#mraster = mraster[row3:rows+row3,col3:cols+col3]
if masked:
mraster = np.ma.masked_array(mraster, mask=mraster < mmin, fill_value=ndv1)
araster = np.ma.masked_array(araster, mask=araster < amin, fill_value=ndv2)
geot = (max(geot1[0], geot2[0]), geot1[1]*blocksize[0], geot1[2],
min(geot1[3], geot2[3]), geot1[4], geot1[-1]*blocksize[1])
return (mraster, araster, geot)
else:
print("Rasters need to be in same projection")
return (-1, -1, -1) | Align two rasters so that data overlaps by geographical location
Usage:
(alignedraster_o, alignedraster_a, geot_a) = AlignRasters(raster, alignraster, how=np.mean)
where:
raster: string with location of raster to be aligned
alignraster: string with location of raster to which raster will be aligned
how: function used to aggregate cells (if the rasters have different sizes)
It is assumed that both rasters have the same size | Below is the the instruction that describes the task:
### Input:
Align two rasters so that data overlaps by geographical location
Usage:
(alignedraster_o, alignedraster_a, geot_a) = AlignRasters(raster, alignraster, how=np.mean)
where:
raster: string with location of raster to be aligned
alignraster: string with location of raster to which raster will be aligned
how: function used to aggregate cells (if the rasters have different sizes)
It is assumed that both rasters have the same size
### Response:
def align_rasters(raster, alignraster, how=np.ma.mean, cxsize=None, cysize=None, masked=False):
'''
Align two rasters so that data overlaps by geographical location
Usage:
(alignedraster_o, alignedraster_a, geot_a) = AlignRasters(raster, alignraster, how=np.mean)
where:
raster: string with location of raster to be aligned
alignraster: string with location of raster to which raster will be aligned
how: function used to aggregate cells (if the rasters have different sizes)
It is assumed that both rasters have the same size
'''
ndv1, xsize1, ysize1, geot1, projection1, datatype1 = get_geo_info(raster)
ndv2, xsize2, ysize2, geot2, projection2, datatype2 = get_geo_info(alignraster)
if projection1.ExportToMICoordSys() == projection2.ExportToMICoordSys():
blocksize = (np.round(geot2[1]/geot1[1]).astype(int), np.round(geot2[-1]/geot1[-1]).astype(int))
mraster = gdalnumeric.LoadFile(raster)
mraster = np.ma.masked_array(mraster, mask=mraster == ndv1, fill_value=ndv1)
mmin = mraster.min()
mraster = block_reduce(mraster, blocksize, func=how)
araster = gdalnumeric.LoadFile(alignraster)
araster = np.ma.masked_array(araster, mask=araster == ndv2, fill_value=ndv2)
amin = araster.min()
if geot1[0] <= geot2[0]:
row3, mcol = map_pixel(geot2[0], geot2[3], geot1[1] *blocksize[0],
geot1[-1]*blocksize[1], geot1[0], geot1[3])
acol = 0
else:
row3, acol = map_pixel(geot1[0], geot1[3], geot2[1], geot2[-1], geot2[0], geot2[3])
mcol = 0
if geot1[3] <= geot2[3]:
arow, col3 = map_pixel(geot1[0], geot1[3], geot2[1], geot2[-1], geot2[0], geot2[3])
mrow = 0
else:
mrow, col3 = map_pixel(geot2[0], geot2[3], geot1[1] *blocksize[0],
geot1[-1]*blocksize[1], geot1[0], geot1[3])
arow = 0
'''
col3,row3 = map_pixel(geot1[0], geot1[3], geot2[1],geot2[-1], geot2[0], geot2[3])
col3 = max(0,col3)
row3 = max(0,row3)
araster = araster[row3:,col3:]
col3,row3 = map_pixel(geot2[0], geot2[3], geot1[1] *blocksize[0],
geot1[-1]*blocksize[1], geot1[0], geot1[3])
col3 = max(0,abs(col3))
row3 = max(0,np.abs(row3))
mraster = mraster[row3:,col3:]
'''
mraster = mraster[mrow:, mcol:]
araster = araster[arow:, acol:]
if cxsize and cysize:
araster = araster[:cysize, :cxsize]
mraster = mraster[:cysize, :cxsize]
else:
rows = min(araster.shape[0], mraster.shape[0])
cols = min(araster.shape[1], mraster.shape[1])
araster = araster[:rows, :cols]
mraster = mraster[:rows, :cols]
#mraster = mraster[row3:rows+row3,col3:cols+col3]
if masked:
mraster = np.ma.masked_array(mraster, mask=mraster < mmin, fill_value=ndv1)
araster = np.ma.masked_array(araster, mask=araster < amin, fill_value=ndv2)
geot = (max(geot1[0], geot2[0]), geot1[1]*blocksize[0], geot1[2],
min(geot1[3], geot2[3]), geot1[4], geot1[-1]*blocksize[1])
return (mraster, araster, geot)
else:
print("Rasters need to be in same projection")
return (-1, -1, -1) |
def files(self, *args, **kwargs):
""" D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
Accepts parameters to :meth:`listdir`.
"""
return [p for p in self.listdir(*args, **kwargs) if p.isfile()] | D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
Accepts parameters to :meth:`listdir`. | Below is the the instruction that describes the task:
### Input:
D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
Accepts parameters to :meth:`listdir`.
### Response:
def files(self, *args, **kwargs):
""" D.files() -> List of the files in this directory.
The elements of the list are Path objects.
This does not walk into subdirectories (see :meth:`walkfiles`).
Accepts parameters to :meth:`listdir`.
"""
return [p for p in self.listdir(*args, **kwargs) if p.isfile()] |
def groups_pools_getGroups():
"""Get a list of groups the auth'd user can post photos to."""
method = 'flickr.groups.pools.getGroups'
data = _doget(method, auth=True)
groups = []
if isinstance(data.rsp.groups.group, list):
for group in data.rsp.groups.group:
groups.append(Group(group.id, name=group.name, \
privacy=group.privacy))
else:
group = data.rsp.groups.group
groups = [Group(group.id, name=group.name, privacy=group.privacy)]
return groups | Get a list of groups the auth'd user can post photos to. | Below is the the instruction that describes the task:
### Input:
Get a list of groups the auth'd user can post photos to.
### Response:
def groups_pools_getGroups():
"""Get a list of groups the auth'd user can post photos to."""
method = 'flickr.groups.pools.getGroups'
data = _doget(method, auth=True)
groups = []
if isinstance(data.rsp.groups.group, list):
for group in data.rsp.groups.group:
groups.append(Group(group.id, name=group.name, \
privacy=group.privacy))
else:
group = data.rsp.groups.group
groups = [Group(group.id, name=group.name, privacy=group.privacy)]
return groups |
def receive(self, path, diffTo, diffFrom):
""" Receive a btrfs diff. """
diff = self.toObj.diff(diffTo, diffFrom)
self._open(self.butterStore.receive(diff, [path, ])) | Receive a btrfs diff. | Below is the the instruction that describes the task:
### Input:
Receive a btrfs diff.
### Response:
def receive(self, path, diffTo, diffFrom):
""" Receive a btrfs diff. """
diff = self.toObj.diff(diffTo, diffFrom)
self._open(self.butterStore.receive(diff, [path, ])) |
def reduce(self, mapped_props, aggregated, value_type, visitor):
"""This reduction is called to combine the mapped slot and collection
item values into a single value for return.
The default implementation tries to behave naturally; you'll almost
always get a dict back when mapping over a record, and list or some
other collection when mapping over collections.
If the collection has additional properties which are not ignored (eg,
not extraneous, not filtered), then the result will be a dictionary
with the results of mapping the properties, and a 'values' key will be
added with the result of mapping the items in the collection.
args:
``mapped_props=``\ *generator*
Iterating over this generator will yield K, V pairs, where K is
**the Property object** and V is the mapped value.
``aggregated=``\ *object*
This contains whatever ``aggregate`` returned, normally a list.
``value_type=``\ *RecordType*
This is the type which is currently being reduced.
A :py:class:`normalize.record.Record` subclass
``visitor=``\ *Visitor*
Contenxt/options object.
"""
reduced = None
if mapped_props:
reduced = dict((k.name, v) for k, v in mapped_props)
if issubclass(value_type, Collection) and aggregated is not None:
if all(visitor.is_filtered(prop) for prop in
value_type.properties.values()):
reduced = aggregated
else:
if reduced.get("values", False):
raise exc.VisitorTooSimple(
fs=visitor.field_selector,
value_type_name=value_type.__name__,
visitor=type(self).__name__,
)
else:
reduced['values'] = aggregated
return reduced | This reduction is called to combine the mapped slot and collection
item values into a single value for return.
The default implementation tries to behave naturally; you'll almost
always get a dict back when mapping over a record, and list or some
other collection when mapping over collections.
If the collection has additional properties which are not ignored (eg,
not extraneous, not filtered), then the result will be a dictionary
with the results of mapping the properties, and a 'values' key will be
added with the result of mapping the items in the collection.
args:
``mapped_props=``\ *generator*
Iterating over this generator will yield K, V pairs, where K is
**the Property object** and V is the mapped value.
``aggregated=``\ *object*
This contains whatever ``aggregate`` returned, normally a list.
``value_type=``\ *RecordType*
This is the type which is currently being reduced.
A :py:class:`normalize.record.Record` subclass
``visitor=``\ *Visitor*
Contenxt/options object. | Below is the the instruction that describes the task:
### Input:
This reduction is called to combine the mapped slot and collection
item values into a single value for return.
The default implementation tries to behave naturally; you'll almost
always get a dict back when mapping over a record, and list or some
other collection when mapping over collections.
If the collection has additional properties which are not ignored (eg,
not extraneous, not filtered), then the result will be a dictionary
with the results of mapping the properties, and a 'values' key will be
added with the result of mapping the items in the collection.
args:
``mapped_props=``\ *generator*
Iterating over this generator will yield K, V pairs, where K is
**the Property object** and V is the mapped value.
``aggregated=``\ *object*
This contains whatever ``aggregate`` returned, normally a list.
``value_type=``\ *RecordType*
This is the type which is currently being reduced.
A :py:class:`normalize.record.Record` subclass
``visitor=``\ *Visitor*
Contenxt/options object.
### Response:
def reduce(self, mapped_props, aggregated, value_type, visitor):
"""This reduction is called to combine the mapped slot and collection
item values into a single value for return.
The default implementation tries to behave naturally; you'll almost
always get a dict back when mapping over a record, and list or some
other collection when mapping over collections.
If the collection has additional properties which are not ignored (eg,
not extraneous, not filtered), then the result will be a dictionary
with the results of mapping the properties, and a 'values' key will be
added with the result of mapping the items in the collection.
args:
``mapped_props=``\ *generator*
Iterating over this generator will yield K, V pairs, where K is
**the Property object** and V is the mapped value.
``aggregated=``\ *object*
This contains whatever ``aggregate`` returned, normally a list.
``value_type=``\ *RecordType*
This is the type which is currently being reduced.
A :py:class:`normalize.record.Record` subclass
``visitor=``\ *Visitor*
Contenxt/options object.
"""
reduced = None
if mapped_props:
reduced = dict((k.name, v) for k, v in mapped_props)
if issubclass(value_type, Collection) and aggregated is not None:
if all(visitor.is_filtered(prop) for prop in
value_type.properties.values()):
reduced = aggregated
else:
if reduced.get("values", False):
raise exc.VisitorTooSimple(
fs=visitor.field_selector,
value_type_name=value_type.__name__,
visitor=type(self).__name__,
)
else:
reduced['values'] = aggregated
return reduced |
def send_PointerEvent(self, x, y, buttonmask=0):
"""Indicates either pointer movement or a pointer button press or
release. The pointer is now at (x-position, y-position),
and the current state of buttons 1 to 8 are represented by
bits 0 to 7 of button-mask respectively, 0 meaning up, 1
meaning down (pressed).
"""
self.sendMessage(struct.pack('!BBHH', 5, buttonmask, x, y)) | Indicates either pointer movement or a pointer button press or
release. The pointer is now at (x-position, y-position),
and the current state of buttons 1 to 8 are represented by
bits 0 to 7 of button-mask respectively, 0 meaning up, 1
meaning down (pressed). | Below is the the instruction that describes the task:
### Input:
Indicates either pointer movement or a pointer button press or
release. The pointer is now at (x-position, y-position),
and the current state of buttons 1 to 8 are represented by
bits 0 to 7 of button-mask respectively, 0 meaning up, 1
meaning down (pressed).
### Response:
def send_PointerEvent(self, x, y, buttonmask=0):
"""Indicates either pointer movement or a pointer button press or
release. The pointer is now at (x-position, y-position),
and the current state of buttons 1 to 8 are represented by
bits 0 to 7 of button-mask respectively, 0 meaning up, 1
meaning down (pressed).
"""
self.sendMessage(struct.pack('!BBHH', 5, buttonmask, x, y)) |
def read_table(source, tablename=None, **kwargs):
"""Read a `Table` from one or more LIGO_LW XML documents
source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list`
one or more open files, file paths, or LIGO_LW `Document` objects
tablename : `str`, optional
the `Name` of the relevant `Table` to read, if not given a table will
be returned if only one exists in the document(s)
**kwargs
keyword arguments for the read, or conversion functions
See Also
--------
gwpy.io.ligolw.read_table
for details of keyword arguments for the read operation
gwpy.table.io.ligolw.to_astropy_table
for details of keyword arguments for the conversion operation
"""
from ligo.lw import table as ligolw_table
from ligo.lw.lsctables import TableByName
# -- keyword handling -----------------------
# separate keywords for reading and converting from LIGO_LW to Astropy
read_kw = kwargs # rename for readability
convert_kw = {
'rename': None,
'use_numpy_dtypes': False,
}
for key in filter(kwargs.__contains__, convert_kw):
convert_kw[key] = kwargs.pop(key)
if convert_kw['rename'] is None:
convert_kw['rename'] = {}
# allow user to specify LIGO_LW columns to read to provide the
# desired output columns
try:
columns = list(kwargs.pop('columns'))
except KeyError:
columns = None
try:
read_kw['columns'] = list(kwargs.pop('ligolw_columns'))
except KeyError:
read_kw['columns'] = columns
convert_kw['columns'] = columns or read_kw['columns']
if tablename:
tableclass = TableByName[ligolw_table.Table.TableName(tablename)]
# work out if fancy property columns are required
# means 'peak_time' and 'peak_time_ns' will get read if 'peak'
# is requested
if convert_kw['columns'] is not None:
readcols = set(read_kw['columns'])
propcols = _get_property_columns(tableclass, convert_kw['columns'])
for col in propcols:
try:
readcols.remove(col)
except KeyError:
continue
readcols.update(propcols[col])
read_kw['columns'] = list(readcols)
# -- read -----------------------------------
return Table(read_ligolw_table(source, tablename=tablename, **read_kw),
**convert_kw) | Read a `Table` from one or more LIGO_LW XML documents
source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list`
one or more open files, file paths, or LIGO_LW `Document` objects
tablename : `str`, optional
the `Name` of the relevant `Table` to read, if not given a table will
be returned if only one exists in the document(s)
**kwargs
keyword arguments for the read, or conversion functions
See Also
--------
gwpy.io.ligolw.read_table
for details of keyword arguments for the read operation
gwpy.table.io.ligolw.to_astropy_table
for details of keyword arguments for the conversion operation | Below is the the instruction that describes the task:
### Input:
Read a `Table` from one or more LIGO_LW XML documents
source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list`
one or more open files, file paths, or LIGO_LW `Document` objects
tablename : `str`, optional
the `Name` of the relevant `Table` to read, if not given a table will
be returned if only one exists in the document(s)
**kwargs
keyword arguments for the read, or conversion functions
See Also
--------
gwpy.io.ligolw.read_table
for details of keyword arguments for the read operation
gwpy.table.io.ligolw.to_astropy_table
for details of keyword arguments for the conversion operation
### Response:
def read_table(source, tablename=None, **kwargs):
"""Read a `Table` from one or more LIGO_LW XML documents
source : `file`, `str`, :class:`~ligo.lw.ligolw.Document`, `list`
one or more open files, file paths, or LIGO_LW `Document` objects
tablename : `str`, optional
the `Name` of the relevant `Table` to read, if not given a table will
be returned if only one exists in the document(s)
**kwargs
keyword arguments for the read, or conversion functions
See Also
--------
gwpy.io.ligolw.read_table
for details of keyword arguments for the read operation
gwpy.table.io.ligolw.to_astropy_table
for details of keyword arguments for the conversion operation
"""
from ligo.lw import table as ligolw_table
from ligo.lw.lsctables import TableByName
# -- keyword handling -----------------------
# separate keywords for reading and converting from LIGO_LW to Astropy
read_kw = kwargs # rename for readability
convert_kw = {
'rename': None,
'use_numpy_dtypes': False,
}
for key in filter(kwargs.__contains__, convert_kw):
convert_kw[key] = kwargs.pop(key)
if convert_kw['rename'] is None:
convert_kw['rename'] = {}
# allow user to specify LIGO_LW columns to read to provide the
# desired output columns
try:
columns = list(kwargs.pop('columns'))
except KeyError:
columns = None
try:
read_kw['columns'] = list(kwargs.pop('ligolw_columns'))
except KeyError:
read_kw['columns'] = columns
convert_kw['columns'] = columns or read_kw['columns']
if tablename:
tableclass = TableByName[ligolw_table.Table.TableName(tablename)]
# work out if fancy property columns are required
# means 'peak_time' and 'peak_time_ns' will get read if 'peak'
# is requested
if convert_kw['columns'] is not None:
readcols = set(read_kw['columns'])
propcols = _get_property_columns(tableclass, convert_kw['columns'])
for col in propcols:
try:
readcols.remove(col)
except KeyError:
continue
readcols.update(propcols[col])
read_kw['columns'] = list(readcols)
# -- read -----------------------------------
return Table(read_ligolw_table(source, tablename=tablename, **read_kw),
**convert_kw) |
def main(nodes, edges):
"""Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid.
"""
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from matplotlib.cm import gist_rainbow
print("building DAG")
G = random_dag(nodes, edges)
jobs = {}
pos = {}
colors = {}
for node in G:
jobs[node] = randomwait
client = parallel.Client()
view = client.load_balanced_view()
print("submitting %i tasks with %i dependencies"%(nodes,edges))
results = submit_jobs(view, G, jobs)
print("waiting for results")
view.wait()
print("done")
for node in G:
md = results[node].metadata
start = date2num(md.started)
runtime = date2num(md.completed) - start
pos[node] = (start, runtime)
colors[node] = md.engine_id
validate_tree(G, results)
nx.draw(G, pos, node_list=colors.keys(), node_color=colors.values(), cmap=gist_rainbow,
with_labels=False)
x,y = zip(*pos.values())
xmin,ymin = map(min, (x,y))
xmax,ymax = map(max, (x,y))
xscale = xmax-xmin
yscale = ymax-ymin
plt.xlim(xmin-xscale*.1,xmax+xscale*.1)
plt.ylim(ymin-yscale*.1,ymax+yscale*.1)
return G,results | Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid. | Below is the the instruction that describes the task:
### Input:
Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid.
### Response:
def main(nodes, edges):
"""Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid.
"""
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from matplotlib.cm import gist_rainbow
print("building DAG")
G = random_dag(nodes, edges)
jobs = {}
pos = {}
colors = {}
for node in G:
jobs[node] = randomwait
client = parallel.Client()
view = client.load_balanced_view()
print("submitting %i tasks with %i dependencies"%(nodes,edges))
results = submit_jobs(view, G, jobs)
print("waiting for results")
view.wait()
print("done")
for node in G:
md = results[node].metadata
start = date2num(md.started)
runtime = date2num(md.completed) - start
pos[node] = (start, runtime)
colors[node] = md.engine_id
validate_tree(G, results)
nx.draw(G, pos, node_list=colors.keys(), node_color=colors.values(), cmap=gist_rainbow,
with_labels=False)
x,y = zip(*pos.values())
xmin,ymin = map(min, (x,y))
xmax,ymax = map(max, (x,y))
xscale = xmax-xmin
yscale = ymax-ymin
plt.xlim(xmin-xscale*.1,xmax+xscale*.1)
plt.ylim(ymin-yscale*.1,ymax+yscale*.1)
return G,results |
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response) | Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id. | Below is the the instruction that describes the task:
### Input:
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
### Response:
def analysis_search(self, query):
"""
Lists the webids of the analyses that match the given query.
Searches in MD5, SHA1, SHA256, filename, cookbook name, comment, url and report id.
"""
response = self._post(self.apiurl + "/v2/analysis/search", data={'apikey': self.apikey, 'q': query})
return self._raise_or_extract(response) |
def format(self, record):
"""Apply little arrow and colors to the record.
Arrow and colors are only applied to sphinxcontrib.versioning log statements.
:param logging.LogRecord record: The log record object to log.
"""
formatted = super(ColorFormatter, self).format(record)
if self.verbose or not record.name.startswith(self.SPECIAL_SCOPE):
return formatted
# Arrow.
formatted = '=> ' + formatted
# Colors.
if not self.colors:
return formatted
if record.levelno >= logging.ERROR:
formatted = str(colorclass.Color.red(formatted))
elif record.levelno >= logging.WARNING:
formatted = str(colorclass.Color.yellow(formatted))
else:
formatted = str(colorclass.Color.cyan(formatted))
return formatted | Apply little arrow and colors to the record.
Arrow and colors are only applied to sphinxcontrib.versioning log statements.
:param logging.LogRecord record: The log record object to log. | Below is the the instruction that describes the task:
### Input:
Apply little arrow and colors to the record.
Arrow and colors are only applied to sphinxcontrib.versioning log statements.
:param logging.LogRecord record: The log record object to log.
### Response:
def format(self, record):
"""Apply little arrow and colors to the record.
Arrow and colors are only applied to sphinxcontrib.versioning log statements.
:param logging.LogRecord record: The log record object to log.
"""
formatted = super(ColorFormatter, self).format(record)
if self.verbose or not record.name.startswith(self.SPECIAL_SCOPE):
return formatted
# Arrow.
formatted = '=> ' + formatted
# Colors.
if not self.colors:
return formatted
if record.levelno >= logging.ERROR:
formatted = str(colorclass.Color.red(formatted))
elif record.levelno >= logging.WARNING:
formatted = str(colorclass.Color.yellow(formatted))
else:
formatted = str(colorclass.Color.cyan(formatted))
return formatted |
def get_google_service(service_type=None,version=None):
'''
get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1)
'''
if service_type == None:
service_type = "storage"
if version == None:
version = "v1"
credentials = GoogleCredentials.get_application_default()
return build(service_type, version, credentials=credentials) | get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1) | Below is the the instruction that describes the task:
### Input:
get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1)
### Response:
def get_google_service(service_type=None,version=None):
'''
get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1)
'''
if service_type == None:
service_type = "storage"
if version == None:
version = "v1"
credentials = GoogleCredentials.get_application_default()
return build(service_type, version, credentials=credentials) |
def default_middlewares(web3):
"""
List the default middlewares for the request manager.
Leaving ens unspecified will prevent the middleware from resolving names.
"""
return [
(request_parameter_normalizer, 'request_param_normalizer'),
(gas_price_strategy_middleware, 'gas_price_strategy'),
(name_to_address_middleware(web3), 'name_to_address'),
(attrdict_middleware, 'attrdict'),
(pythonic_middleware, 'pythonic'),
(normalize_errors_middleware, 'normalize_errors'),
(validation_middleware, 'validation'),
(abi_middleware, 'abi'),
] | List the default middlewares for the request manager.
Leaving ens unspecified will prevent the middleware from resolving names. | Below is the the instruction that describes the task:
### Input:
List the default middlewares for the request manager.
Leaving ens unspecified will prevent the middleware from resolving names.
### Response:
def default_middlewares(web3):
"""
List the default middlewares for the request manager.
Leaving ens unspecified will prevent the middleware from resolving names.
"""
return [
(request_parameter_normalizer, 'request_param_normalizer'),
(gas_price_strategy_middleware, 'gas_price_strategy'),
(name_to_address_middleware(web3), 'name_to_address'),
(attrdict_middleware, 'attrdict'),
(pythonic_middleware, 'pythonic'),
(normalize_errors_middleware, 'normalize_errors'),
(validation_middleware, 'validation'),
(abi_middleware, 'abi'),
] |
def render(self, name, value, attrs=None, multi=False, renderer=None):
"""
Django <= 1.10 variant.
"""
DJANGO_111_OR_UP = (VERSION[0] == 1 and VERSION[1] >= 11) or (
VERSION[0] >= 2
)
if DJANGO_111_OR_UP:
return super(DynamicRawIDWidget, self).render(
name, value, attrs, renderer=renderer
)
if attrs is None:
attrs = {}
related_url = reverse(
'admin:{0}_{1}_changelist'.format(
self.rel.to._meta.app_label,
self.rel.to._meta.object_name.lower(),
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = u'?' + u'&'.join(
[u'{0}={1}'.format(k, v) for k, v in params.items()]
)
else:
url = u''
if "class" not in attrs:
attrs[
'class'
] = (
'vForeignKeyRawIdAdminField'
) # The JavaScript looks for this hook.
app_name = self.rel.to._meta.app_label.strip()
model_name = self.rel.to._meta.object_name.lower().strip()
hidden_input = super(widgets.ForeignKeyRawIdWidget, self).render(
name, value, attrs
)
extra_context = {
'hidden_input': hidden_input,
'name': name,
'app_name': app_name,
'model_name': model_name,
'related_url': related_url,
'url': url,
}
return render_to_string(
'dynamic_raw_id/admin/widgets/dynamic_raw_id_field.html',
extra_context,
) | Django <= 1.10 variant. | Below is the the instruction that describes the task:
### Input:
Django <= 1.10 variant.
### Response:
def render(self, name, value, attrs=None, multi=False, renderer=None):
"""
Django <= 1.10 variant.
"""
DJANGO_111_OR_UP = (VERSION[0] == 1 and VERSION[1] >= 11) or (
VERSION[0] >= 2
)
if DJANGO_111_OR_UP:
return super(DynamicRawIDWidget, self).render(
name, value, attrs, renderer=renderer
)
if attrs is None:
attrs = {}
related_url = reverse(
'admin:{0}_{1}_changelist'.format(
self.rel.to._meta.app_label,
self.rel.to._meta.object_name.lower(),
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
url = u'?' + u'&'.join(
[u'{0}={1}'.format(k, v) for k, v in params.items()]
)
else:
url = u''
if "class" not in attrs:
attrs[
'class'
] = (
'vForeignKeyRawIdAdminField'
) # The JavaScript looks for this hook.
app_name = self.rel.to._meta.app_label.strip()
model_name = self.rel.to._meta.object_name.lower().strip()
hidden_input = super(widgets.ForeignKeyRawIdWidget, self).render(
name, value, attrs
)
extra_context = {
'hidden_input': hidden_input,
'name': name,
'app_name': app_name,
'model_name': model_name,
'related_url': related_url,
'url': url,
}
return render_to_string(
'dynamic_raw_id/admin/widgets/dynamic_raw_id_field.html',
extra_context,
) |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
# Compute SA with primed coeffs and PGA with both unprimed and
# primed coeffs
C = self.COEFFS_PRIMED[imt]
C_PGA = self.COEFFS_PRIMED[PGA()]
C_PGA_unprimed = self.COEFFS_UNPRIMED[PGA()]
SC = self.STRESS_COEFFS[imt]
# Get S term to determine if consider site term is applied
S = self._get_site_class(sites)
# Abrahamson and Silva (1997) hanging wall term. This is not used
# in the latest version of GMPE but is defined in functional form in
# the paper so we keep it here as a placeholder
f4HW = self._compute_f4(C, rup.mag, dists.rrup)
# Flags for rake angles
CN, CR = self._get_fault_mechanism_flags(rup.rake)
# Get volcanic path distance which Rvol=0 for current implementation
# of McVerry2006Asc, but kept here as placeholder for future use
rvol = self._get_volcanic_path_distance(dists.rrup)
# Get delta_C and delta_D terms for site class
delta_C, delta_D = self._get_deltas(sites)
# Get Atkinson and Boore (2006) stress drop factors or additional
# standard deviation adjustment. Only apply these factors to sources
# located within the boundaries of the CSHM.
in_cshm = self._check_in_cshm_polygon(rup)
if in_cshm is True:
stress_drop_factor = self._compute_stress_drop_adjustment(SC,
rup.mag)
additional_sigma = self._compute_additional_sigma()
else:
stress_drop_factor = 0
additional_sigma = 0
# Compute lnPGA_ABCD primed
lnPGAp_ABCD = self._compute_mean(C_PGA, S, rup.mag, dists.rrup, rvol,
rup.hypo_depth, CN, CR, f4HW,
delta_C, delta_D)
# Compute lnPGA_ABCD unprimed
lnPGA_ABCD = self._compute_mean(C_PGA_unprimed, S, rup.mag, dists.rrup,
rvol, rup.hypo_depth, CN, CR, f4HW,
delta_C, delta_D)
# Compute lnSA_ABCD
lnSAp_ABCD = self._compute_mean(C, S, rup.mag, dists.rrup, rvol,
rup.hypo_depth, CN, CR, f4HW,
delta_C, delta_D)
# Stage 3: Equation 6 SA_ABCD(T). This is lnSA_ABCD
# need to calculate final lnSA_ABCD from non-log values but return log
mean = np.log(np.exp(lnSAp_ABCD) *
(np.exp(lnPGA_ABCD) /
np.exp(lnPGAp_ABCD))) + stress_drop_factor
# Compute standard deviations
C_STD = self.COEFFS_STD[imt]
stddevs = self._get_stddevs_chch(
C_STD, rup.mag, stddev_types, sites, additional_sigma
)
return mean, stddevs | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | Below is the the instruction that describes the task:
### Input:
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
### Response:
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
# Compute SA with primed coeffs and PGA with both unprimed and
# primed coeffs
C = self.COEFFS_PRIMED[imt]
C_PGA = self.COEFFS_PRIMED[PGA()]
C_PGA_unprimed = self.COEFFS_UNPRIMED[PGA()]
SC = self.STRESS_COEFFS[imt]
# Get S term to determine if consider site term is applied
S = self._get_site_class(sites)
# Abrahamson and Silva (1997) hanging wall term. This is not used
# in the latest version of GMPE but is defined in functional form in
# the paper so we keep it here as a placeholder
f4HW = self._compute_f4(C, rup.mag, dists.rrup)
# Flags for rake angles
CN, CR = self._get_fault_mechanism_flags(rup.rake)
# Get volcanic path distance which Rvol=0 for current implementation
# of McVerry2006Asc, but kept here as placeholder for future use
rvol = self._get_volcanic_path_distance(dists.rrup)
# Get delta_C and delta_D terms for site class
delta_C, delta_D = self._get_deltas(sites)
# Get Atkinson and Boore (2006) stress drop factors or additional
# standard deviation adjustment. Only apply these factors to sources
# located within the boundaries of the CSHM.
in_cshm = self._check_in_cshm_polygon(rup)
if in_cshm is True:
stress_drop_factor = self._compute_stress_drop_adjustment(SC,
rup.mag)
additional_sigma = self._compute_additional_sigma()
else:
stress_drop_factor = 0
additional_sigma = 0
# Compute lnPGA_ABCD primed
lnPGAp_ABCD = self._compute_mean(C_PGA, S, rup.mag, dists.rrup, rvol,
rup.hypo_depth, CN, CR, f4HW,
delta_C, delta_D)
# Compute lnPGA_ABCD unprimed
lnPGA_ABCD = self._compute_mean(C_PGA_unprimed, S, rup.mag, dists.rrup,
rvol, rup.hypo_depth, CN, CR, f4HW,
delta_C, delta_D)
# Compute lnSA_ABCD
lnSAp_ABCD = self._compute_mean(C, S, rup.mag, dists.rrup, rvol,
rup.hypo_depth, CN, CR, f4HW,
delta_C, delta_D)
# Stage 3: Equation 6 SA_ABCD(T). This is lnSA_ABCD
# need to calculate final lnSA_ABCD from non-log values but return log
mean = np.log(np.exp(lnSAp_ABCD) *
(np.exp(lnPGA_ABCD) /
np.exp(lnPGAp_ABCD))) + stress_drop_factor
# Compute standard deviations
C_STD = self.COEFFS_STD[imt]
stddevs = self._get_stddevs_chch(
C_STD, rup.mag, stddev_types, sites, additional_sigma
)
return mean, stddevs |
def observe(self, amount):
"""Observe the given amount."""
self._count.inc(1)
self._sum.inc(amount) | Observe the given amount. | Below is the the instruction that describes the task:
### Input:
Observe the given amount.
### Response:
def observe(self, amount):
"""Observe the given amount."""
self._count.inc(1)
self._sum.inc(amount) |
def _EncodeParameters(self, parameters):
"""
Return a string in key=value&key=value form.
:param parameters: A dict of (key, value) tuples, where value is encoded as specified by self._encoding
:return:A URL-encoded string in "key=value&key=value" form
"""
if parameters is None:
return None
else:
return urlencode(dict([(k, self._Encode(v)) for k, v in list(parameters.items()) if v is not None])) | Return a string in key=value&key=value form.
:param parameters: A dict of (key, value) tuples, where value is encoded as specified by self._encoding
:return:A URL-encoded string in "key=value&key=value" form | Below is the the instruction that describes the task:
### Input:
Return a string in key=value&key=value form.
:param parameters: A dict of (key, value) tuples, where value is encoded as specified by self._encoding
:return:A URL-encoded string in "key=value&key=value" form
### Response:
def _EncodeParameters(self, parameters):
"""
Return a string in key=value&key=value form.
:param parameters: A dict of (key, value) tuples, where value is encoded as specified by self._encoding
:return:A URL-encoded string in "key=value&key=value" form
"""
if parameters is None:
return None
else:
return urlencode(dict([(k, self._Encode(v)) for k, v in list(parameters.items()) if v is not None])) |
def addImage(self,name,rsrc):
"""
Adds an image to the internal registry.
``rsrc`` should be a 2-tuple of ``(resource_name,category)``\ .
"""
self.imgs[name]=self.widget.peng.resourceMgr.getTex(*rsrc) | Adds an image to the internal registry.
``rsrc`` should be a 2-tuple of ``(resource_name,category)``\ . | Below is the the instruction that describes the task:
### Input:
Adds an image to the internal registry.
``rsrc`` should be a 2-tuple of ``(resource_name,category)``\ .
### Response:
def addImage(self,name,rsrc):
"""
Adds an image to the internal registry.
``rsrc`` should be a 2-tuple of ``(resource_name,category)``\ .
"""
self.imgs[name]=self.widget.peng.resourceMgr.getTex(*rsrc) |
def make_ctypes_convertor(self, _flags):
"""
Fix clang types to ctypes convertion for this parsing isntance.
Some architecture dependent size types ahve to be changed if the target
architecture is not the same as local
"""
tu = util.get_tu('''
typedef short short_t;
typedef int int_t;
typedef long long_t;
typedef long long longlong_t;
typedef float float_t;
typedef double double_t;
typedef long double longdouble_t;
typedef void* pointer_t;''', flags=_flags)
size = util.get_cursor(tu, 'short_t').type.get_size() * 8
self.ctypes_typename[TypeKind.SHORT] = 'c_int%d' % (size)
self.ctypes_typename[TypeKind.USHORT] = 'c_uint%d' % (size)
self.ctypes_sizes[TypeKind.SHORT] = size
self.ctypes_sizes[TypeKind.USHORT] = size
size = util.get_cursor(tu, 'int_t').type.get_size() * 8
self.ctypes_typename[TypeKind.INT] = 'c_int%d' % (size)
self.ctypes_typename[TypeKind.UINT] = 'c_uint%d' % (size)
self.ctypes_sizes[TypeKind.INT] = size
self.ctypes_sizes[TypeKind.UINT] = size
size = util.get_cursor(tu, 'long_t').type.get_size() * 8
self.ctypes_typename[TypeKind.LONG] = 'c_int%d' % (size)
self.ctypes_typename[TypeKind.ULONG] = 'c_uint%d' % (size)
self.ctypes_sizes[TypeKind.LONG] = size
self.ctypes_sizes[TypeKind.ULONG] = size
size = util.get_cursor(tu, 'longlong_t').type.get_size() * 8
self.ctypes_typename[TypeKind.LONGLONG] = 'c_int%d' % (size)
self.ctypes_typename[TypeKind.ULONGLONG] = 'c_uint%d' % (size)
self.ctypes_sizes[TypeKind.LONGLONG] = size
self.ctypes_sizes[TypeKind.ULONGLONG] = size
# FIXME : Float && http://en.wikipedia.org/wiki/Long_double
size0 = util.get_cursor(tu, 'float_t').type.get_size() * 8
size1 = util.get_cursor(tu, 'double_t').type.get_size() * 8
size2 = util.get_cursor(tu, 'longdouble_t').type.get_size() * 8
# 2014-01 stop generating crap.
# 2015-01 reverse until better solution is found
# the idea is that a you cannot assume a c_double will be same format as a c_long_double.
# at least this pass size TU
if size1 != size2:
self.ctypes_typename[TypeKind.LONGDOUBLE] = 'c_long_double_t'
else:
self.ctypes_typename[TypeKind.LONGDOUBLE] = 'c_double'
self.ctypes_sizes[TypeKind.FLOAT] = size0
self.ctypes_sizes[TypeKind.DOUBLE] = size1
self.ctypes_sizes[TypeKind.LONGDOUBLE] = size2
# save the target pointer size.
size = util.get_cursor(tu, 'pointer_t').type.get_size() * 8
self.ctypes_sizes[TypeKind.POINTER] = size
self.ctypes_sizes[TypeKind.NULLPTR] = size
log.debug('ARCH sizes: long:%s longdouble:%s',
self.ctypes_typename[TypeKind.LONG],
self.ctypes_typename[TypeKind.LONGDOUBLE])
return | Fix clang types to ctypes convertion for this parsing isntance.
Some architecture dependent size types ahve to be changed if the target
architecture is not the same as local | Below is the the instruction that describes the task:
### Input:
Fix clang types to ctypes convertion for this parsing isntance.
Some architecture dependent size types ahve to be changed if the target
architecture is not the same as local
### Response:
def make_ctypes_convertor(self, _flags):
"""
Fix clang types to ctypes convertion for this parsing isntance.
Some architecture dependent size types ahve to be changed if the target
architecture is not the same as local
"""
tu = util.get_tu('''
typedef short short_t;
typedef int int_t;
typedef long long_t;
typedef long long longlong_t;
typedef float float_t;
typedef double double_t;
typedef long double longdouble_t;
typedef void* pointer_t;''', flags=_flags)
size = util.get_cursor(tu, 'short_t').type.get_size() * 8
self.ctypes_typename[TypeKind.SHORT] = 'c_int%d' % (size)
self.ctypes_typename[TypeKind.USHORT] = 'c_uint%d' % (size)
self.ctypes_sizes[TypeKind.SHORT] = size
self.ctypes_sizes[TypeKind.USHORT] = size
size = util.get_cursor(tu, 'int_t').type.get_size() * 8
self.ctypes_typename[TypeKind.INT] = 'c_int%d' % (size)
self.ctypes_typename[TypeKind.UINT] = 'c_uint%d' % (size)
self.ctypes_sizes[TypeKind.INT] = size
self.ctypes_sizes[TypeKind.UINT] = size
size = util.get_cursor(tu, 'long_t').type.get_size() * 8
self.ctypes_typename[TypeKind.LONG] = 'c_int%d' % (size)
self.ctypes_typename[TypeKind.ULONG] = 'c_uint%d' % (size)
self.ctypes_sizes[TypeKind.LONG] = size
self.ctypes_sizes[TypeKind.ULONG] = size
size = util.get_cursor(tu, 'longlong_t').type.get_size() * 8
self.ctypes_typename[TypeKind.LONGLONG] = 'c_int%d' % (size)
self.ctypes_typename[TypeKind.ULONGLONG] = 'c_uint%d' % (size)
self.ctypes_sizes[TypeKind.LONGLONG] = size
self.ctypes_sizes[TypeKind.ULONGLONG] = size
# FIXME : Float && http://en.wikipedia.org/wiki/Long_double
size0 = util.get_cursor(tu, 'float_t').type.get_size() * 8
size1 = util.get_cursor(tu, 'double_t').type.get_size() * 8
size2 = util.get_cursor(tu, 'longdouble_t').type.get_size() * 8
# 2014-01 stop generating crap.
# 2015-01 reverse until better solution is found
# the idea is that a you cannot assume a c_double will be same format as a c_long_double.
# at least this pass size TU
if size1 != size2:
self.ctypes_typename[TypeKind.LONGDOUBLE] = 'c_long_double_t'
else:
self.ctypes_typename[TypeKind.LONGDOUBLE] = 'c_double'
self.ctypes_sizes[TypeKind.FLOAT] = size0
self.ctypes_sizes[TypeKind.DOUBLE] = size1
self.ctypes_sizes[TypeKind.LONGDOUBLE] = size2
# save the target pointer size.
size = util.get_cursor(tu, 'pointer_t').type.get_size() * 8
self.ctypes_sizes[TypeKind.POINTER] = size
self.ctypes_sizes[TypeKind.NULLPTR] = size
log.debug('ARCH sizes: long:%s longdouble:%s',
self.ctypes_typename[TypeKind.LONG],
self.ctypes_typename[TypeKind.LONGDOUBLE])
return |
def get_description(self):
"""
Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns str: WF description
"""
paths = ['bpmn:collaboration/bpmn:participant/bpmn:documentation',
'bpmn:collaboration/bpmn:documentation',
'bpmn:process/bpmn:documentation']
for path in paths:
elm = self.root.find(path, NS)
if elm is not None and elm.text:
return elm.text | Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns str: WF description | Below is the the instruction that describes the task:
### Input:
Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns str: WF description
### Response:
def get_description(self):
"""
Tries to get WF description from 'collabration' or 'process' or 'pariticipant'
Returns str: WF description
"""
paths = ['bpmn:collaboration/bpmn:participant/bpmn:documentation',
'bpmn:collaboration/bpmn:documentation',
'bpmn:process/bpmn:documentation']
for path in paths:
elm = self.root.find(path, NS)
if elm is not None and elm.text:
return elm.text |
def file_crc32(filePath):
"""计算文件的crc32检验码:
Args:
filePath: 待计算校验码的文件路径
Returns:
文件内容的crc32校验码。
"""
crc = 0
with open(filePath, 'rb') as f:
for block in _file_iter(f, _BLOCK_SIZE):
crc = binascii.crc32(block, crc) & 0xFFFFFFFF
return crc | 计算文件的crc32检验码:
Args:
filePath: 待计算校验码的文件路径
Returns:
文件内容的crc32校验码。 | Below is the the instruction that describes the task:
### Input:
计算文件的crc32检验码:
Args:
filePath: 待计算校验码的文件路径
Returns:
文件内容的crc32校验码。
### Response:
def file_crc32(filePath):
"""计算文件的crc32检验码:
Args:
filePath: 待计算校验码的文件路径
Returns:
文件内容的crc32校验码。
"""
crc = 0
with open(filePath, 'rb') as f:
for block in _file_iter(f, _BLOCK_SIZE):
crc = binascii.crc32(block, crc) & 0xFFFFFFFF
return crc |
def remote_archive(class_obj: type) -> type:
"""
Decorator to annotate the RemoteArchive class. Registers the decorated class
as the RemoteArchive known type.
"""
assert isinstance(class_obj, type), "class_obj is not a Class"
global _remote_archive_resource_type
_remote_archive_resource_type = class_obj
return class_obj | Decorator to annotate the RemoteArchive class. Registers the decorated class
as the RemoteArchive known type. | Below is the the instruction that describes the task:
### Input:
Decorator to annotate the RemoteArchive class. Registers the decorated class
as the RemoteArchive known type.
### Response:
def remote_archive(class_obj: type) -> type:
"""
Decorator to annotate the RemoteArchive class. Registers the decorated class
as the RemoteArchive known type.
"""
assert isinstance(class_obj, type), "class_obj is not a Class"
global _remote_archive_resource_type
_remote_archive_resource_type = class_obj
return class_obj |
def add_state(self, state_name, initial_state, batch_size=None):
"""Adds a state to the state saver.
Args:
state_name: The name of this state.
initial_state: The initial state vector. Only zeros are supported.
batch_size: The batch_size or None for unknown.
"""
state_shape = initial_state.get_shape().as_list()
full_shape = [batch_size] + state_shape
if not batch_size:
# TODO(): -1 is now reserved for unknown, so this should be
# updated, but that requires coordination with the binary and is
# checkpoint incompatible.
# TODO(eiderman): When we make the above breaking change, we should make
# the C++ client use the initial state instead of passing in zeros.
shape_proto = self._as_shape_proto([0] + state_shape)
batch_size = 1
else:
shape_proto = self._as_shape_proto([batch_size] + state_shape)
# Add a constant tensor of zeros. At training time, this will initialize
# the state with the initial_state - at inference time,
# this node is replaced by a feed.
tiles = [batch_size] + ([1] * len(initial_state.get_shape()))
feed_op = tf.placeholder_with_default(
tf.tile(
tf.expand_dims(initial_state, [0]), tiles),
shape=full_shape,
name='%s_feed' % state_name)
s = {'feed_op': feed_op,
'feed_type': initial_state.dtype,
'feed_shape': shape_proto}
self._states[state_name] = s | Adds a state to the state saver.
Args:
state_name: The name of this state.
initial_state: The initial state vector. Only zeros are supported.
batch_size: The batch_size or None for unknown. | Below is the the instruction that describes the task:
### Input:
Adds a state to the state saver.
Args:
state_name: The name of this state.
initial_state: The initial state vector. Only zeros are supported.
batch_size: The batch_size or None for unknown.
### Response:
def add_state(self, state_name, initial_state, batch_size=None):
"""Adds a state to the state saver.
Args:
state_name: The name of this state.
initial_state: The initial state vector. Only zeros are supported.
batch_size: The batch_size or None for unknown.
"""
state_shape = initial_state.get_shape().as_list()
full_shape = [batch_size] + state_shape
if not batch_size:
# TODO(): -1 is now reserved for unknown, so this should be
# updated, but that requires coordination with the binary and is
# checkpoint incompatible.
# TODO(eiderman): When we make the above breaking change, we should make
# the C++ client use the initial state instead of passing in zeros.
shape_proto = self._as_shape_proto([0] + state_shape)
batch_size = 1
else:
shape_proto = self._as_shape_proto([batch_size] + state_shape)
# Add a constant tensor of zeros. At training time, this will initialize
# the state with the initial_state - at inference time,
# this node is replaced by a feed.
tiles = [batch_size] + ([1] * len(initial_state.get_shape()))
feed_op = tf.placeholder_with_default(
tf.tile(
tf.expand_dims(initial_state, [0]), tiles),
shape=full_shape,
name='%s_feed' % state_name)
s = {'feed_op': feed_op,
'feed_type': initial_state.dtype,
'feed_shape': shape_proto}
self._states[state_name] = s |
def validate_name(self, name):
"""
Can the name be used as a python module or package?
Raises ``ValueError`` if the name is invalid.
:param name: the name to check
"""
if not name:
raise ValueError("Name cannot be empty")
# Can the name be used as an identifier in python (module or package name)
if not name.isidentifier():
raise ValueError("{} is not a valid identifier".format(name)) | Can the name be used as a python module or package?
Raises ``ValueError`` if the name is invalid.
:param name: the name to check | Below is the the instruction that describes the task:
### Input:
Can the name be used as a python module or package?
Raises ``ValueError`` if the name is invalid.
:param name: the name to check
### Response:
def validate_name(self, name):
"""
Can the name be used as a python module or package?
Raises ``ValueError`` if the name is invalid.
:param name: the name to check
"""
if not name:
raise ValueError("Name cannot be empty")
# Can the name be used as an identifier in python (module or package name)
if not name.isidentifier():
raise ValueError("{} is not a valid identifier".format(name)) |
def setup(app):
''' Required Sphinx extension setup function. '''
app.add_node(bokeh_palette_group, html=(html_visit_bokeh_palette_group, None))
app.add_directive('bokeh-palette-group', BokehPaletteGroupDirective) | Required Sphinx extension setup function. | Below is the the instruction that describes the task:
### Input:
Required Sphinx extension setup function.
### Response:
def setup(app):
''' Required Sphinx extension setup function. '''
app.add_node(bokeh_palette_group, html=(html_visit_bokeh_palette_group, None))
app.add_directive('bokeh-palette-group', BokehPaletteGroupDirective) |
def _update_state(self, vals):
"""
Takes as input a list or tuple of two elements. First the
value returned by incrementing by 'stepsize' followed by the
value returned after a 'stepsize' decrement.
"""
self._steps_complete += 1
if self._steps_complete == self.max_steps:
self._termination_info = (False, self._best_val, self._arg)
return StopIteration
arg_inc, arg_dec = vals
best_val = min(arg_inc, arg_dec, self._best_val)
if best_val == self._best_val:
self._termination_info = (True, best_val, self._arg)
return StopIteration
self._arg += self.stepsize if (arg_dec > arg_inc) else -self.stepsize
self._best_val= best_val
return [{self.key:self._arg+self.stepsize},
{self.key:self._arg-self.stepsize}] | Takes as input a list or tuple of two elements. First the
value returned by incrementing by 'stepsize' followed by the
value returned after a 'stepsize' decrement. | Below is the the instruction that describes the task:
### Input:
Takes as input a list or tuple of two elements. First the
value returned by incrementing by 'stepsize' followed by the
value returned after a 'stepsize' decrement.
### Response:
def _update_state(self, vals):
"""
Takes as input a list or tuple of two elements. First the
value returned by incrementing by 'stepsize' followed by the
value returned after a 'stepsize' decrement.
"""
self._steps_complete += 1
if self._steps_complete == self.max_steps:
self._termination_info = (False, self._best_val, self._arg)
return StopIteration
arg_inc, arg_dec = vals
best_val = min(arg_inc, arg_dec, self._best_val)
if best_val == self._best_val:
self._termination_info = (True, best_val, self._arg)
return StopIteration
self._arg += self.stepsize if (arg_dec > arg_inc) else -self.stepsize
self._best_val= best_val
return [{self.key:self._arg+self.stepsize},
{self.key:self._arg-self.stepsize}] |
def setup(self):
"""Initialize the driver by setting up GPIO interrupts
and periodic statistics processing. """
# Initialize the statistics variables.
self.radiation_count = 0
self.noise_count = 0
self.count = 0
# Initialize count_history[].
self.count_history = [0] * HISTORY_LENGTH
self.history_index = 0
# Init measurement time.
self.previous_time = millis()
self.previous_history_time = millis()
self.duration = 0
# Init the GPIO context.
GPIO.setup(self.radiation_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.noise_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Register local callbacks.
GPIO.add_event_detect(
self.radiation_pin, GPIO.FALLING, callback=self._on_radiation
)
GPIO.add_event_detect(self.noise_pin, GPIO.FALLING, callback=self._on_noise)
# Enable the timer for processing the statistics periodically.
self._enable_timer()
return self | Initialize the driver by setting up GPIO interrupts
and periodic statistics processing. | Below is the the instruction that describes the task:
### Input:
Initialize the driver by setting up GPIO interrupts
and periodic statistics processing.
### Response:
def setup(self):
"""Initialize the driver by setting up GPIO interrupts
and periodic statistics processing. """
# Initialize the statistics variables.
self.radiation_count = 0
self.noise_count = 0
self.count = 0
# Initialize count_history[].
self.count_history = [0] * HISTORY_LENGTH
self.history_index = 0
# Init measurement time.
self.previous_time = millis()
self.previous_history_time = millis()
self.duration = 0
# Init the GPIO context.
GPIO.setup(self.radiation_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.noise_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Register local callbacks.
GPIO.add_event_detect(
self.radiation_pin, GPIO.FALLING, callback=self._on_radiation
)
GPIO.add_event_detect(self.noise_pin, GPIO.FALLING, callback=self._on_noise)
# Enable the timer for processing the statistics periodically.
self._enable_timer()
return self |
def format(self, record):
"""
The formatting function
:param record: The record object
:return: The string representation of the record
"""
try:
n = record.n
except AttributeError:
n = 'default'
try:
message = record.message
except AttributeError:
message = record.msg
senml = OrderedDict(
uid="hyperstream",
bt=datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z',
e=[OrderedDict(n=n, v=message)]
)
formatted_json = json.dumps(senml)
return formatted_json | The formatting function
:param record: The record object
:return: The string representation of the record | Below is the the instruction that describes the task:
### Input:
The formatting function
:param record: The record object
:return: The string representation of the record
### Response:
def format(self, record):
"""
The formatting function
:param record: The record object
:return: The string representation of the record
"""
try:
n = record.n
except AttributeError:
n = 'default'
try:
message = record.message
except AttributeError:
message = record.msg
senml = OrderedDict(
uid="hyperstream",
bt=datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z',
e=[OrderedDict(n=n, v=message)]
)
formatted_json = json.dumps(senml)
return formatted_json |
def _parse_json(doc, exactly_one=True):
"""
Parse a location name, latitude, and longitude from an JSON response.
"""
status_code = doc.get("statusCode", 200)
if status_code != 200:
err = doc.get("errorDetails", "")
if status_code == 401:
raise GeocoderAuthenticationFailure(err)
elif status_code == 403:
raise GeocoderInsufficientPrivileges(err)
elif status_code == 429:
raise GeocoderQuotaExceeded(err)
elif status_code == 503:
raise GeocoderUnavailable(err)
else:
raise GeocoderServiceError(err)
try:
resources = doc['Response']['View'][0]['Result']
except IndexError:
resources = None
if not resources:
return None
def parse_resource(resource):
"""
Parse each return object.
"""
stripchars = ", \n"
addr = resource['Location']['Address']
address = addr.get('Label', '').strip(stripchars)
city = addr.get('City', '').strip(stripchars)
state = addr.get('State', '').strip(stripchars)
zipcode = addr.get('PostalCode', '').strip(stripchars)
country = addr.get('Country', '').strip(stripchars)
city_state = join_filter(", ", [city, state])
place = join_filter(" ", [city_state, zipcode])
location = join_filter(", ", [address, place, country])
display_pos = resource['Location']['DisplayPosition']
latitude = float(display_pos['Latitude'])
longitude = float(display_pos['Longitude'])
return Location(location, (latitude, longitude), resource)
if exactly_one:
return parse_resource(resources[0])
else:
return [parse_resource(resource) for resource in resources] | Parse a location name, latitude, and longitude from an JSON response. | Below is the the instruction that describes the task:
### Input:
Parse a location name, latitude, and longitude from an JSON response.
### Response:
def _parse_json(doc, exactly_one=True):
"""
Parse a location name, latitude, and longitude from an JSON response.
"""
status_code = doc.get("statusCode", 200)
if status_code != 200:
err = doc.get("errorDetails", "")
if status_code == 401:
raise GeocoderAuthenticationFailure(err)
elif status_code == 403:
raise GeocoderInsufficientPrivileges(err)
elif status_code == 429:
raise GeocoderQuotaExceeded(err)
elif status_code == 503:
raise GeocoderUnavailable(err)
else:
raise GeocoderServiceError(err)
try:
resources = doc['Response']['View'][0]['Result']
except IndexError:
resources = None
if not resources:
return None
def parse_resource(resource):
"""
Parse each return object.
"""
stripchars = ", \n"
addr = resource['Location']['Address']
address = addr.get('Label', '').strip(stripchars)
city = addr.get('City', '').strip(stripchars)
state = addr.get('State', '').strip(stripchars)
zipcode = addr.get('PostalCode', '').strip(stripchars)
country = addr.get('Country', '').strip(stripchars)
city_state = join_filter(", ", [city, state])
place = join_filter(" ", [city_state, zipcode])
location = join_filter(", ", [address, place, country])
display_pos = resource['Location']['DisplayPosition']
latitude = float(display_pos['Latitude'])
longitude = float(display_pos['Longitude'])
return Location(location, (latitude, longitude), resource)
if exactly_one:
return parse_resource(resources[0])
else:
return [parse_resource(resource) for resource in resources] |
def skip(self):
'''Mark the item as processed without download.'''
_logger.debug(__(_('Skipping ‘{url}’.'), url=self.url_record.url))
self.app_session.factory['URLTable'].check_in(self.url_record.url, Status.skipped)
self._processed = True | Mark the item as processed without download. | Below is the the instruction that describes the task:
### Input:
Mark the item as processed without download.
### Response:
def skip(self):
'''Mark the item as processed without download.'''
_logger.debug(__(_('Skipping ‘{url}’.'), url=self.url_record.url))
self.app_session.factory['URLTable'].check_in(self.url_record.url, Status.skipped)
self._processed = True |
def write_to_file(self, filename):
"""Write the molecular geometry to a file.
The file format is inferred from the extensions. Currently supported
formats are: ``*.xyz``, ``*.cml``
Argument:
| ``filename`` -- a filename
"""
# TODO: give all file format writers the same API
if filename.endswith('.cml'):
from molmod.io import dump_cml
dump_cml(filename, [self])
elif filename.endswith('.xyz'):
from molmod.io import XYZWriter
symbols = []
for n in self.numbers:
atom = periodic[n]
if atom is None:
symbols.append("X")
else:
symbols.append(atom.symbol)
xyz_writer = XYZWriter(filename, symbols)
xyz_writer.dump(self.title, self.coordinates)
del xyz_writer
else:
raise ValueError("Could not determine file format for %s." % filename) | Write the molecular geometry to a file.
The file format is inferred from the extensions. Currently supported
formats are: ``*.xyz``, ``*.cml``
Argument:
| ``filename`` -- a filename | Below is the the instruction that describes the task:
### Input:
Write the molecular geometry to a file.
The file format is inferred from the extensions. Currently supported
formats are: ``*.xyz``, ``*.cml``
Argument:
| ``filename`` -- a filename
### Response:
def write_to_file(self, filename):
"""Write the molecular geometry to a file.
The file format is inferred from the extensions. Currently supported
formats are: ``*.xyz``, ``*.cml``
Argument:
| ``filename`` -- a filename
"""
# TODO: give all file format writers the same API
if filename.endswith('.cml'):
from molmod.io import dump_cml
dump_cml(filename, [self])
elif filename.endswith('.xyz'):
from molmod.io import XYZWriter
symbols = []
for n in self.numbers:
atom = periodic[n]
if atom is None:
symbols.append("X")
else:
symbols.append(atom.symbol)
xyz_writer = XYZWriter(filename, symbols)
xyz_writer.dump(self.title, self.coordinates)
del xyz_writer
else:
raise ValueError("Could not determine file format for %s." % filename) |
def query_by_id(self, id):
"""
Returns object with _id = id.
:param id: Value for ID. If not a string will be coerced to string.
:type id: ``string``
:return: Document with id
:rtype: ``dict``
"""
return json.loads(self._get(UrlEncoded(str(id))).body.read().decode('utf-8')) | Returns object with _id = id.
:param id: Value for ID. If not a string will be coerced to string.
:type id: ``string``
:return: Document with id
:rtype: ``dict`` | Below is the the instruction that describes the task:
### Input:
Returns object with _id = id.
:param id: Value for ID. If not a string will be coerced to string.
:type id: ``string``
:return: Document with id
:rtype: ``dict``
### Response:
def query_by_id(self, id):
"""
Returns object with _id = id.
:param id: Value for ID. If not a string will be coerced to string.
:type id: ``string``
:return: Document with id
:rtype: ``dict``
"""
return json.loads(self._get(UrlEncoded(str(id))).body.read().decode('utf-8')) |
def reassign_label(self, label, new_label, relabel=False):
"""
Reassign a label number to a new number.
If ``new_label`` is already present in the segmentation image,
then it will be combined with the input ``label`` number.
Parameters
----------
labels : int
The label number to reassign.
new_label : int
The newly assigned label number.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=2)
>>> segm.data
array([[2, 2, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=4)
>>> segm.data
array([[4, 4, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=4, relabel=True)
>>> segm.data
array([[2, 2, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 1, 1, 0, 0],
[4, 0, 0, 0, 0, 3],
[4, 4, 0, 3, 3, 3],
[4, 4, 0, 0, 3, 3]])
"""
self.reassign_labels(label, new_label, relabel=relabel) | Reassign a label number to a new number.
If ``new_label`` is already present in the segmentation image,
then it will be combined with the input ``label`` number.
Parameters
----------
labels : int
The label number to reassign.
new_label : int
The newly assigned label number.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=2)
>>> segm.data
array([[2, 2, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=4)
>>> segm.data
array([[4, 4, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=4, relabel=True)
>>> segm.data
array([[2, 2, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 1, 1, 0, 0],
[4, 0, 0, 0, 0, 3],
[4, 4, 0, 3, 3, 3],
[4, 4, 0, 0, 3, 3]]) | Below is the the instruction that describes the task:
### Input:
Reassign a label number to a new number.
If ``new_label`` is already present in the segmentation image,
then it will be combined with the input ``label`` number.
Parameters
----------
labels : int
The label number to reassign.
new_label : int
The newly assigned label number.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=2)
>>> segm.data
array([[2, 2, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=4)
>>> segm.data
array([[4, 4, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=4, relabel=True)
>>> segm.data
array([[2, 2, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 1, 1, 0, 0],
[4, 0, 0, 0, 0, 3],
[4, 4, 0, 3, 3, 3],
[4, 4, 0, 0, 3, 3]])
### Response:
def reassign_label(self, label, new_label, relabel=False):
"""
Reassign a label number to a new number.
If ``new_label`` is already present in the segmentation image,
then it will be combined with the input ``label`` number.
Parameters
----------
labels : int
The label number to reassign.
new_label : int
The newly assigned label number.
relabel : bool, optional
If `True`, then the segmentation image will be relabeled
such that the labels are in consecutive order starting from
1.
Examples
--------
>>> from photutils import SegmentationImage
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=2)
>>> segm.data
array([[2, 2, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=4)
>>> segm.data
array([[4, 4, 0, 0, 4, 4],
[0, 0, 0, 0, 0, 4],
[0, 0, 3, 3, 0, 0],
[7, 0, 0, 0, 0, 5],
[7, 7, 0, 5, 5, 5],
[7, 7, 0, 0, 5, 5]])
>>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4],
... [0, 0, 0, 0, 0, 4],
... [0, 0, 3, 3, 0, 0],
... [7, 0, 0, 0, 0, 5],
... [7, 7, 0, 5, 5, 5],
... [7, 7, 0, 0, 5, 5]])
>>> segm.reassign_label(label=1, new_label=4, relabel=True)
>>> segm.data
array([[2, 2, 0, 0, 2, 2],
[0, 0, 0, 0, 0, 2],
[0, 0, 1, 1, 0, 0],
[4, 0, 0, 0, 0, 3],
[4, 4, 0, 3, 3, 3],
[4, 4, 0, 0, 3, 3]])
"""
self.reassign_labels(label, new_label, relabel=relabel) |
def numToDigits(num, places):
"""
Helper, for converting numbers to textual digits.
"""
s = str(num)
if len(s) < places:
return ("0" * (places - len(s))) + s
elif len(s) > places:
return s[len(s)-places: ]
else:
return s | Helper, for converting numbers to textual digits. | Below is the the instruction that describes the task:
### Input:
Helper, for converting numbers to textual digits.
### Response:
def numToDigits(num, places):
"""
Helper, for converting numbers to textual digits.
"""
s = str(num)
if len(s) < places:
return ("0" * (places - len(s))) + s
elif len(s) > places:
return s[len(s)-places: ]
else:
return s |
def authorize_host_to_volume(self, volume_id,
hardware_ids=None,
virtual_guest_ids=None,
ip_address_ids=None,
**kwargs):
"""Authorizes hosts to Block Storage Volumes
:param volume_id: The Block volume to authorize hosts to
:param hardware_ids: A List of SoftLayer_Hardware ids
:param virtual_guest_ids: A List of SoftLayer_Virtual_Guest ids
:param ip_address_ids: A List of SoftLayer_Network_Subnet_IpAddress ids
:return: Returns an array of
SoftLayer_Network_Storage_Allowed_Host objects
which now have access to the given Block volume
"""
host_templates = []
storage_utils.populate_host_templates(host_templates,
hardware_ids,
virtual_guest_ids,
ip_address_ids,
None)
return self.client.call('Network_Storage', 'allowAccessFromHostList',
host_templates, id=volume_id, **kwargs) | Authorizes hosts to Block Storage Volumes
:param volume_id: The Block volume to authorize hosts to
:param hardware_ids: A List of SoftLayer_Hardware ids
:param virtual_guest_ids: A List of SoftLayer_Virtual_Guest ids
:param ip_address_ids: A List of SoftLayer_Network_Subnet_IpAddress ids
:return: Returns an array of
SoftLayer_Network_Storage_Allowed_Host objects
which now have access to the given Block volume | Below is the the instruction that describes the task:
### Input:
Authorizes hosts to Block Storage Volumes
:param volume_id: The Block volume to authorize hosts to
:param hardware_ids: A List of SoftLayer_Hardware ids
:param virtual_guest_ids: A List of SoftLayer_Virtual_Guest ids
:param ip_address_ids: A List of SoftLayer_Network_Subnet_IpAddress ids
:return: Returns an array of
SoftLayer_Network_Storage_Allowed_Host objects
which now have access to the given Block volume
### Response:
def authorize_host_to_volume(self, volume_id,
hardware_ids=None,
virtual_guest_ids=None,
ip_address_ids=None,
**kwargs):
"""Authorizes hosts to Block Storage Volumes
:param volume_id: The Block volume to authorize hosts to
:param hardware_ids: A List of SoftLayer_Hardware ids
:param virtual_guest_ids: A List of SoftLayer_Virtual_Guest ids
:param ip_address_ids: A List of SoftLayer_Network_Subnet_IpAddress ids
:return: Returns an array of
SoftLayer_Network_Storage_Allowed_Host objects
which now have access to the given Block volume
"""
host_templates = []
storage_utils.populate_host_templates(host_templates,
hardware_ids,
virtual_guest_ids,
ip_address_ids,
None)
return self.client.call('Network_Storage', 'allowAccessFromHostList',
host_templates, id=volume_id, **kwargs) |
def grid_1d(self):
""" The arc second-grid of (y,x) coordinates of every pixel.
This is defined from the top-left corner, such that the first pixel at location [0, 0] will have a negative x \
value y value in arc seconds.
"""
return grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=self.shape,
pixel_scales=self.pixel_scales,
origin=self.origin) | The arc second-grid of (y,x) coordinates of every pixel.
This is defined from the top-left corner, such that the first pixel at location [0, 0] will have a negative x \
value y value in arc seconds. | Below is the the instruction that describes the task:
### Input:
The arc second-grid of (y,x) coordinates of every pixel.
This is defined from the top-left corner, such that the first pixel at location [0, 0] will have a negative x \
value y value in arc seconds.
### Response:
def grid_1d(self):
""" The arc second-grid of (y,x) coordinates of every pixel.
This is defined from the top-left corner, such that the first pixel at location [0, 0] will have a negative x \
value y value in arc seconds.
"""
return grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=self.shape,
pixel_scales=self.pixel_scales,
origin=self.origin) |
def check(self):
"""Check that the entry has the required fields."""
# Make sure there is a schema key in dict
if self._KEYS.SCHEMA not in self:
self[self._KEYS.SCHEMA] = self.catalog.SCHEMA.URL
# Make sure there is a name key in dict
if (self._KEYS.NAME not in self or len(self[self._KEYS.NAME]) == 0):
raise ValueError("Entry name is empty:\n\t{}".format(
json.dumps(
self, indent=2)))
return | Check that the entry has the required fields. | Below is the the instruction that describes the task:
### Input:
Check that the entry has the required fields.
### Response:
def check(self):
"""Check that the entry has the required fields."""
# Make sure there is a schema key in dict
if self._KEYS.SCHEMA not in self:
self[self._KEYS.SCHEMA] = self.catalog.SCHEMA.URL
# Make sure there is a name key in dict
if (self._KEYS.NAME not in self or len(self[self._KEYS.NAME]) == 0):
raise ValueError("Entry name is empty:\n\t{}".format(
json.dumps(
self, indent=2)))
return |
def request_comments(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/requests#listing-comments"
api_path = "/api/v2/requests/{id}/comments.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/requests#listing-comments | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/requests#listing-comments
### Response:
def request_comments(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/requests#listing-comments"
api_path = "/api/v2/requests/{id}/comments.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def add_uppercase(table):
"""
Extend the table with uppercase options
>>> print("а" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["а"] == "a")
True
>>> print("А" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["А"] == "A")
True
>>> print(len(add_uppercase({"а": "a"}).keys()))
2
>>> print("Аа" in add_uppercase({"аа": "aa"}))
True
>>> print(add_uppercase({"аа": "aa"})["Аа"] == "Aa")
True
"""
orig = table.copy()
orig.update(
dict((k.capitalize(), v.capitalize()) for k, v in table.items()))
return orig | Extend the table with uppercase options
>>> print("а" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["а"] == "a")
True
>>> print("А" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["А"] == "A")
True
>>> print(len(add_uppercase({"а": "a"}).keys()))
2
>>> print("Аа" in add_uppercase({"аа": "aa"}))
True
>>> print(add_uppercase({"аа": "aa"})["Аа"] == "Aa")
True | Below is the the instruction that describes the task:
### Input:
Extend the table with uppercase options
>>> print("а" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["а"] == "a")
True
>>> print("А" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["А"] == "A")
True
>>> print(len(add_uppercase({"а": "a"}).keys()))
2
>>> print("Аа" in add_uppercase({"аа": "aa"}))
True
>>> print(add_uppercase({"аа": "aa"})["Аа"] == "Aa")
True
### Response:
def add_uppercase(table):
"""
Extend the table with uppercase options
>>> print("а" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["а"] == "a")
True
>>> print("А" in add_uppercase({"а": "a"}))
True
>>> print(add_uppercase({"а": "a"})["А"] == "A")
True
>>> print(len(add_uppercase({"а": "a"}).keys()))
2
>>> print("Аа" in add_uppercase({"аа": "aa"}))
True
>>> print(add_uppercase({"аа": "aa"})["Аа"] == "Aa")
True
"""
orig = table.copy()
orig.update(
dict((k.capitalize(), v.capitalize()) for k, v in table.items()))
return orig |
def get_numeric_value(event_tags, logger=None):
"""
A smart getter of the numeric value from the event tags.
Args:
event_tags: A dictionary of event tags.
logger: Optional logger.
Returns:
A float numeric metric value is returned when the provided numeric
metric value is in the following format:
- A string (properly formatted, e.g., no commas)
- An integer
- A float or double
None is returned when the provided numeric metric values is in
the following format:
- None
- A boolean
- inf, -inf, nan
- A string not properly formatted (e.g., '1,234')
- Any values that cannot be cast to a float (e.g., an array or dictionary)
"""
logger_message_debug = None
numeric_metric_value = None
if event_tags is None:
logger_message_debug = 'Event tags is undefined.'
elif not isinstance(event_tags, dict):
logger_message_debug = 'Event tags is not a dictionary.'
elif NUMERIC_METRIC_TYPE not in event_tags:
logger_message_debug = 'The numeric metric key is not in event tags.'
else:
numeric_metric_value = event_tags[NUMERIC_METRIC_TYPE]
try:
if isinstance(numeric_metric_value, (numbers.Integral, float, str)):
# Attempt to convert the numeric metric value to a float
# (if it isn't already a float).
cast_numeric_metric_value = float(numeric_metric_value)
# If not a float after casting, then make everything else a None.
# Other potential values are nan, inf, and -inf.
if not isinstance(cast_numeric_metric_value, float) \
or math.isnan(cast_numeric_metric_value) \
or math.isinf(cast_numeric_metric_value):
logger_message_debug = 'Provided numeric value {} is in an invalid format.'\
.format(numeric_metric_value)
numeric_metric_value = None
else:
# Handle booleans as a special case.
# They are treated like an integer in the cast, but we do not want to cast this.
if isinstance(numeric_metric_value, bool):
logger_message_debug = 'Provided numeric value is a boolean, which is an invalid format.'
numeric_metric_value = None
else:
numeric_metric_value = cast_numeric_metric_value
else:
logger_message_debug = 'Numeric metric value is not in integer, float, or string form.'
numeric_metric_value = None
except ValueError:
logger_message_debug = 'Value error while casting numeric metric value to a float.'
numeric_metric_value = None
# Log all potential debug messages while converting the numeric value to a float.
if logger and logger_message_debug:
logger.log(enums.LogLevels.DEBUG, logger_message_debug)
# Log the final numeric metric value
if numeric_metric_value is not None:
if logger:
logger.log(enums.LogLevels.INFO,
'The numeric metric value {} will be sent to results.'
.format(numeric_metric_value))
else:
if logger:
logger.log(enums.LogLevels.WARNING,
'The provided numeric metric value {} is in an invalid format and will not be sent to results.'
.format(numeric_metric_value))
return numeric_metric_value | A smart getter of the numeric value from the event tags.
Args:
event_tags: A dictionary of event tags.
logger: Optional logger.
Returns:
A float numeric metric value is returned when the provided numeric
metric value is in the following format:
- A string (properly formatted, e.g., no commas)
- An integer
- A float or double
None is returned when the provided numeric metric values is in
the following format:
- None
- A boolean
- inf, -inf, nan
- A string not properly formatted (e.g., '1,234')
- Any values that cannot be cast to a float (e.g., an array or dictionary) | Below is the the instruction that describes the task:
### Input:
A smart getter of the numeric value from the event tags.
Args:
event_tags: A dictionary of event tags.
logger: Optional logger.
Returns:
A float numeric metric value is returned when the provided numeric
metric value is in the following format:
- A string (properly formatted, e.g., no commas)
- An integer
- A float or double
None is returned when the provided numeric metric values is in
the following format:
- None
- A boolean
- inf, -inf, nan
- A string not properly formatted (e.g., '1,234')
- Any values that cannot be cast to a float (e.g., an array or dictionary)
### Response:
def get_numeric_value(event_tags, logger=None):
"""
A smart getter of the numeric value from the event tags.
Args:
event_tags: A dictionary of event tags.
logger: Optional logger.
Returns:
A float numeric metric value is returned when the provided numeric
metric value is in the following format:
- A string (properly formatted, e.g., no commas)
- An integer
- A float or double
None is returned when the provided numeric metric values is in
the following format:
- None
- A boolean
- inf, -inf, nan
- A string not properly formatted (e.g., '1,234')
- Any values that cannot be cast to a float (e.g., an array or dictionary)
"""
logger_message_debug = None
numeric_metric_value = None
if event_tags is None:
logger_message_debug = 'Event tags is undefined.'
elif not isinstance(event_tags, dict):
logger_message_debug = 'Event tags is not a dictionary.'
elif NUMERIC_METRIC_TYPE not in event_tags:
logger_message_debug = 'The numeric metric key is not in event tags.'
else:
numeric_metric_value = event_tags[NUMERIC_METRIC_TYPE]
try:
if isinstance(numeric_metric_value, (numbers.Integral, float, str)):
# Attempt to convert the numeric metric value to a float
# (if it isn't already a float).
cast_numeric_metric_value = float(numeric_metric_value)
# If not a float after casting, then make everything else a None.
# Other potential values are nan, inf, and -inf.
if not isinstance(cast_numeric_metric_value, float) \
or math.isnan(cast_numeric_metric_value) \
or math.isinf(cast_numeric_metric_value):
logger_message_debug = 'Provided numeric value {} is in an invalid format.'\
.format(numeric_metric_value)
numeric_metric_value = None
else:
# Handle booleans as a special case.
# They are treated like an integer in the cast, but we do not want to cast this.
if isinstance(numeric_metric_value, bool):
logger_message_debug = 'Provided numeric value is a boolean, which is an invalid format.'
numeric_metric_value = None
else:
numeric_metric_value = cast_numeric_metric_value
else:
logger_message_debug = 'Numeric metric value is not in integer, float, or string form.'
numeric_metric_value = None
except ValueError:
logger_message_debug = 'Value error while casting numeric metric value to a float.'
numeric_metric_value = None
# Log all potential debug messages while converting the numeric value to a float.
if logger and logger_message_debug:
logger.log(enums.LogLevels.DEBUG, logger_message_debug)
# Log the final numeric metric value
if numeric_metric_value is not None:
if logger:
logger.log(enums.LogLevels.INFO,
'The numeric metric value {} will be sent to results.'
.format(numeric_metric_value))
else:
if logger:
logger.log(enums.LogLevels.WARNING,
'The provided numeric metric value {} is in an invalid format and will not be sent to results.'
.format(numeric_metric_value))
return numeric_metric_value |
def parse(source, world, jointgroup=None, density=1000, color=None):
'''Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies.
'''
visitor = Visitor(world, jointgroup, density, color)
visitor.parse(re.sub(r'#.*', ' ', source.read()))
return visitor | Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies. | Below is the the instruction that describes the task:
### Input:
Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies.
### Response:
def parse(source, world, jointgroup=None, density=1000, color=None):
'''Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies.
'''
visitor = Visitor(world, jointgroup, density, color)
visitor.parse(re.sub(r'#.*', ' ', source.read()))
return visitor |
def _get_cache_dir():
'''
Get pillar cache directory. Initialize it if it does not exist.
'''
cache_dir = os.path.join(__opts__['cachedir'], 'pillar_s3fs')
if not os.path.isdir(cache_dir):
log.debug('Initializing S3 Pillar Cache')
os.makedirs(cache_dir)
return cache_dir | Get pillar cache directory. Initialize it if it does not exist. | Below is the the instruction that describes the task:
### Input:
Get pillar cache directory. Initialize it if it does not exist.
### Response:
def _get_cache_dir():
'''
Get pillar cache directory. Initialize it if it does not exist.
'''
cache_dir = os.path.join(__opts__['cachedir'], 'pillar_s3fs')
if not os.path.isdir(cache_dir):
log.debug('Initializing S3 Pillar Cache')
os.makedirs(cache_dir)
return cache_dir |
def paintEvent(self, event):
"""Override Qt method"""
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
# Decoration
painter.fillPath(self.path_current, QBrush(self.color))
painter.strokePath(self.path_decoration, QPen(self.color_decoration,
self.stroke_decoration)) | Override Qt method | Below is the the instruction that describes the task:
### Input:
Override Qt method
### Response:
def paintEvent(self, event):
"""Override Qt method"""
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
# Decoration
painter.fillPath(self.path_current, QBrush(self.color))
painter.strokePath(self.path_decoration, QPen(self.color_decoration,
self.stroke_decoration)) |
def computeRange(corners):
""" Determine the range spanned by an array of pixel positions. """
x = corners[:, 0]
y = corners[:, 1]
_xrange = (np.minimum.reduce(x), np.maximum.reduce(x))
_yrange = (np.minimum.reduce(y), np.maximum.reduce(y))
return _xrange, _yrange | Determine the range spanned by an array of pixel positions. | Below is the the instruction that describes the task:
### Input:
Determine the range spanned by an array of pixel positions.
### Response:
def computeRange(corners):
""" Determine the range spanned by an array of pixel positions. """
x = corners[:, 0]
y = corners[:, 1]
_xrange = (np.minimum.reduce(x), np.maximum.reduce(x))
_yrange = (np.minimum.reduce(y), np.maximum.reduce(y))
return _xrange, _yrange |
def multidict(ordered_pairs):
"""Convert duplicate keys values to lists."""
# read all values into lists
d = defaultdict(list)
for k, v in ordered_pairs:
d[k].append(v)
# unpack lists that have only 1 item
dict_copy = deepcopy(d)
for k, v in iteritems(dict_copy):
if len(v) == 1:
d[k] = v[0]
return dict(d) | Convert duplicate keys values to lists. | Below is the the instruction that describes the task:
### Input:
Convert duplicate keys values to lists.
### Response:
def multidict(ordered_pairs):
"""Convert duplicate keys values to lists."""
# read all values into lists
d = defaultdict(list)
for k, v in ordered_pairs:
d[k].append(v)
# unpack lists that have only 1 item
dict_copy = deepcopy(d)
for k, v in iteritems(dict_copy):
if len(v) == 1:
d[k] = v[0]
return dict(d) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.