code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def lock(self, connection, session):
"""Explicitly lock the specified connection
:type connection: psycopg2.extensions.connection
:param connection: The connection to lock
:param queries.Session session: The session to hold the lock
"""
cid = id(connection)
try:
self.connection_handle(connection).lock(session)
except KeyError:
raise ConnectionNotFoundError(self.id, cid)
else:
if self.idle_start:
with self._lock:
self.idle_start = None
LOGGER.debug('Pool %s locked connection %s', self.id, cid) | Explicitly lock the specified connection
:type connection: psycopg2.extensions.connection
:param connection: The connection to lock
:param queries.Session session: The session to hold the lock | Below is the the instruction that describes the task:
### Input:
Explicitly lock the specified connection
:type connection: psycopg2.extensions.connection
:param connection: The connection to lock
:param queries.Session session: The session to hold the lock
### Response:
def lock(self, connection, session):
"""Explicitly lock the specified connection
:type connection: psycopg2.extensions.connection
:param connection: The connection to lock
:param queries.Session session: The session to hold the lock
"""
cid = id(connection)
try:
self.connection_handle(connection).lock(session)
except KeyError:
raise ConnectionNotFoundError(self.id, cid)
else:
if self.idle_start:
with self._lock:
self.idle_start = None
LOGGER.debug('Pool %s locked connection %s', self.id, cid) |
def get_setter(cls, prop_name, # @NoSelf
user_setter=None, setter_takes_name=False,
user_getter=None, getter_takes_name=False):
"""Similar to get_getter, but for setting property
values. If user_getter is specified, that it may be used to
get the old value of the property before setting it (this
is the case in some derived classes' implementation). if
getter_takes_name is True and user_getter is not None, than
the property name is passed to the given getter to retrieve
the property value."""
if user_setter:
if setter_takes_name:
# wraps the property name
def _setter(self, val):
return user_setter(self, prop_name, val)
else: _setter = user_setter
return _setter
def _setter(self, val): # @DuplicatedSignature
setattr(self, PROP_NAME % {'prop_name' : prop_name}, val)
return
return _setter | Similar to get_getter, but for setting property
values. If user_getter is specified, that it may be used to
get the old value of the property before setting it (this
is the case in some derived classes' implementation). if
getter_takes_name is True and user_getter is not None, than
the property name is passed to the given getter to retrieve
the property value. | Below is the the instruction that describes the task:
### Input:
Similar to get_getter, but for setting property
values. If user_getter is specified, that it may be used to
get the old value of the property before setting it (this
is the case in some derived classes' implementation). if
getter_takes_name is True and user_getter is not None, than
the property name is passed to the given getter to retrieve
the property value.
### Response:
def get_setter(cls, prop_name, # @NoSelf
user_setter=None, setter_takes_name=False,
user_getter=None, getter_takes_name=False):
"""Similar to get_getter, but for setting property
values. If user_getter is specified, that it may be used to
get the old value of the property before setting it (this
is the case in some derived classes' implementation). if
getter_takes_name is True and user_getter is not None, than
the property name is passed to the given getter to retrieve
the property value."""
if user_setter:
if setter_takes_name:
# wraps the property name
def _setter(self, val):
return user_setter(self, prop_name, val)
else: _setter = user_setter
return _setter
def _setter(self, val): # @DuplicatedSignature
setattr(self, PROP_NAME % {'prop_name' : prop_name}, val)
return
return _setter |
def setup_and_run_bwakit(job, uuid, url, rg_line, config, paired_url=None):
"""
Downloads and runs bwakit for BAM or FASTQ files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension.
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.cores Number of cores for each job
config.trim If True, trim adapters using bwakit
config.amb FileStoreID for BWA index file prefix.amb
config.ann FileStoreID for BWA index file prefix.ann
config.bwt FileStoreID for BWA index file prefix.bwt
config.pac FileStoreID for BWA index file prefix.pac
config.sa FileStoreID for BWA index file prefix.sa
config.alt FileStoreID for alternate contigs file or None
:param str|None paired_url: URL to paired FASTQ
:param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar)
:return: BAM FileStoreID
:rtype: str
"""
bwa_config = deepcopy(config)
bwa_config.uuid = uuid
bwa_config.rg_line = rg_line
# bwa_alignment uses a different naming convention
bwa_config.ref = config.genome_fasta
bwa_config.fai = config.genome_fai
# Determine if sample is a FASTQ or BAM file using the file extension
basename, ext = os.path.splitext(url)
ext = ext.lower()
if ext == '.gz':
_, ext = os.path.splitext(basename)
ext = ext.lower()
# The pipeline currently supports FASTQ and BAM files
require(ext in ['.fq', '.fastq', '.bam'],
'Please use .fq or .bam file extensions:\n%s' % url)
# Download fastq files
samples = []
input1 = job.addChildJobFn(download_url_job,
url,
name='file1',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input1.rv())
# If the extension is for a BAM file, then configure bwakit to realign the BAM file.
if ext == '.bam':
bwa_config.bam = input1.rv()
else:
bwa_config.r1 = input1.rv()
# Download the paired FASTQ URL
if paired_url:
input2 = job.addChildJobFn(download_url_job,
paired_url,
name='file2',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input2.rv())
bwa_config.r2 = input2.rv()
# The bwakit disk requirement depends on the size of the input files and the index
# Take the sum of the input files and scale it by a factor of 4
bwa_index_size = sum([getattr(config, index_file).size
for index_file in ['amb', 'ann', 'bwt', 'pac', 'sa', 'alt']
if getattr(config, index_file, None) is not None])
bwakit_disk = PromisedRequirement(lambda lst, index_size:
int(4 * sum(x.size for x in lst) + index_size),
samples,
bwa_index_size)
return job.addFollowOnJobFn(run_bwakit,
bwa_config,
sort=False, # BAM files are sorted later in the pipeline
trim=config.trim,
cores=config.cores,
disk=bwakit_disk).rv() | Downloads and runs bwakit for BAM or FASTQ files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension.
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.cores Number of cores for each job
config.trim If True, trim adapters using bwakit
config.amb FileStoreID for BWA index file prefix.amb
config.ann FileStoreID for BWA index file prefix.ann
config.bwt FileStoreID for BWA index file prefix.bwt
config.pac FileStoreID for BWA index file prefix.pac
config.sa FileStoreID for BWA index file prefix.sa
config.alt FileStoreID for alternate contigs file or None
:param str|None paired_url: URL to paired FASTQ
:param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar)
:return: BAM FileStoreID
:rtype: str | Below is the the instruction that describes the task:
### Input:
Downloads and runs bwakit for BAM or FASTQ files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension.
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.cores Number of cores for each job
config.trim If True, trim adapters using bwakit
config.amb FileStoreID for BWA index file prefix.amb
config.ann FileStoreID for BWA index file prefix.ann
config.bwt FileStoreID for BWA index file prefix.bwt
config.pac FileStoreID for BWA index file prefix.pac
config.sa FileStoreID for BWA index file prefix.sa
config.alt FileStoreID for alternate contigs file or None
:param str|None paired_url: URL to paired FASTQ
:param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar)
:return: BAM FileStoreID
:rtype: str
### Response:
def setup_and_run_bwakit(job, uuid, url, rg_line, config, paired_url=None):
"""
Downloads and runs bwakit for BAM or FASTQ files
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str uuid: Unique sample identifier
:param str url: FASTQ or BAM file URL. BAM alignment URL must have .bam extension.
:param Namespace config: Input parameters and shared FileStoreIDs
Requires the following config attributes:
config.genome_fasta FilesStoreID for reference genome fasta file
config.genome_fai FilesStoreID for reference genome fasta index file
config.cores Number of cores for each job
config.trim If True, trim adapters using bwakit
config.amb FileStoreID for BWA index file prefix.amb
config.ann FileStoreID for BWA index file prefix.ann
config.bwt FileStoreID for BWA index file prefix.bwt
config.pac FileStoreID for BWA index file prefix.pac
config.sa FileStoreID for BWA index file prefix.sa
config.alt FileStoreID for alternate contigs file or None
:param str|None paired_url: URL to paired FASTQ
:param str|None rg_line: Read group line (i.e. @RG\tID:foo\tSM:bar)
:return: BAM FileStoreID
:rtype: str
"""
bwa_config = deepcopy(config)
bwa_config.uuid = uuid
bwa_config.rg_line = rg_line
# bwa_alignment uses a different naming convention
bwa_config.ref = config.genome_fasta
bwa_config.fai = config.genome_fai
# Determine if sample is a FASTQ or BAM file using the file extension
basename, ext = os.path.splitext(url)
ext = ext.lower()
if ext == '.gz':
_, ext = os.path.splitext(basename)
ext = ext.lower()
# The pipeline currently supports FASTQ and BAM files
require(ext in ['.fq', '.fastq', '.bam'],
'Please use .fq or .bam file extensions:\n%s' % url)
# Download fastq files
samples = []
input1 = job.addChildJobFn(download_url_job,
url,
name='file1',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input1.rv())
# If the extension is for a BAM file, then configure bwakit to realign the BAM file.
if ext == '.bam':
bwa_config.bam = input1.rv()
else:
bwa_config.r1 = input1.rv()
# Download the paired FASTQ URL
if paired_url:
input2 = job.addChildJobFn(download_url_job,
paired_url,
name='file2',
s3_key_path=config.ssec,
disk=config.file_size)
samples.append(input2.rv())
bwa_config.r2 = input2.rv()
# The bwakit disk requirement depends on the size of the input files and the index
# Take the sum of the input files and scale it by a factor of 4
bwa_index_size = sum([getattr(config, index_file).size
for index_file in ['amb', 'ann', 'bwt', 'pac', 'sa', 'alt']
if getattr(config, index_file, None) is not None])
bwakit_disk = PromisedRequirement(lambda lst, index_size:
int(4 * sum(x.size for x in lst) + index_size),
samples,
bwa_index_size)
return job.addFollowOnJobFn(run_bwakit,
bwa_config,
sort=False, # BAM files are sorted later in the pipeline
trim=config.trim,
cores=config.cores,
disk=bwakit_disk).rv() |
def _list_existing(filesystem, glob, paths):
"""
Get all the paths that do in fact exist. Returns a set of all existing paths.
Takes a luigi.target.FileSystem object, a str which represents a glob and
a list of strings representing paths.
"""
globs = _constrain_glob(glob, paths)
time_start = time.time()
listing = []
for g in sorted(globs):
logger.debug('Listing %s', g)
if filesystem.exists(g):
listing.extend(filesystem.listdir(g))
logger.debug('%d %s listings took %f s to return %d items',
len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing))
return set(listing) | Get all the paths that do in fact exist. Returns a set of all existing paths.
Takes a luigi.target.FileSystem object, a str which represents a glob and
a list of strings representing paths. | Below is the the instruction that describes the task:
### Input:
Get all the paths that do in fact exist. Returns a set of all existing paths.
Takes a luigi.target.FileSystem object, a str which represents a glob and
a list of strings representing paths.
### Response:
def _list_existing(filesystem, glob, paths):
"""
Get all the paths that do in fact exist. Returns a set of all existing paths.
Takes a luigi.target.FileSystem object, a str which represents a glob and
a list of strings representing paths.
"""
globs = _constrain_glob(glob, paths)
time_start = time.time()
listing = []
for g in sorted(globs):
logger.debug('Listing %s', g)
if filesystem.exists(g):
listing.extend(filesystem.listdir(g))
logger.debug('%d %s listings took %f s to return %d items',
len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing))
return set(listing) |
def get_as_list(self, tag_name):
"""
Return the value of a tag, making sure that it's a list. Absent
tags are returned as an empty-list; single tags are returned as a
one-element list.
The returned list is a copy, and modifications do not affect the
original object.
"""
val = self.get(tag_name, [])
if isinstance(val, list):
return val[:]
else:
return [val] | Return the value of a tag, making sure that it's a list. Absent
tags are returned as an empty-list; single tags are returned as a
one-element list.
The returned list is a copy, and modifications do not affect the
original object. | Below is the the instruction that describes the task:
### Input:
Return the value of a tag, making sure that it's a list. Absent
tags are returned as an empty-list; single tags are returned as a
one-element list.
The returned list is a copy, and modifications do not affect the
original object.
### Response:
def get_as_list(self, tag_name):
"""
Return the value of a tag, making sure that it's a list. Absent
tags are returned as an empty-list; single tags are returned as a
one-element list.
The returned list is a copy, and modifications do not affect the
original object.
"""
val = self.get(tag_name, [])
if isinstance(val, list):
return val[:]
else:
return [val] |
def circle(self, x, y, radius):
"""draw circle"""
self._add_instruction("arc", x, y, radius, 0, math.pi * 2) | draw circle | Below is the the instruction that describes the task:
### Input:
draw circle
### Response:
def circle(self, x, y, radius):
"""draw circle"""
self._add_instruction("arc", x, y, radius, 0, math.pi * 2) |
def persist(self):
"""Trigger saving the current sensorgraph to persistent storage."""
self.persisted_nodes = self.graph.dump_nodes()
self.persisted_streamers = self.graph.dump_streamers()
self.persisted_exists = True
self.persisted_constants = self._sensor_log.dump_constants() | Trigger saving the current sensorgraph to persistent storage. | Below is the the instruction that describes the task:
### Input:
Trigger saving the current sensorgraph to persistent storage.
### Response:
def persist(self):
"""Trigger saving the current sensorgraph to persistent storage."""
self.persisted_nodes = self.graph.dump_nodes()
self.persisted_streamers = self.graph.dump_streamers()
self.persisted_exists = True
self.persisted_constants = self._sensor_log.dump_constants() |
def monitor_counters(mc, output, counters, detailed, f):
"""Monitor the counters on a specified machine, taking a snap-shot every
time the generator 'f' yields."""
# Print CSV header
output.write("time,{}{}\n".format("x,y," if detailed else "",
",".join(counters)))
system_info = mc.get_system_info()
# Make an initial sample of the counters
last_counter_values = sample_counters(mc, system_info)
start_time = time.time()
for _ in f():
# Snapshot the change in counter values
counter_values = sample_counters(mc, system_info)
delta = deltas(last_counter_values, counter_values)
last_counter_values = counter_values
now = time.time() - start_time
# Output the changes
if detailed:
for x, y in sorted(system_info):
output.write("{:0.1f},{},{},{}\n".format(
now, x, y,
",".join(str(getattr(delta[(x, y)], c))
for c in counters)))
else:
totals = [0 for _ in counters]
for xy in sorted(system_info):
for i, counter in enumerate(counters):
totals[i] += getattr(delta[xy], counter)
output.write("{:0.1f},{}\n".format(
now, ",".join(map(str, totals)))) | Monitor the counters on a specified machine, taking a snap-shot every
time the generator 'f' yields. | Below is the the instruction that describes the task:
### Input:
Monitor the counters on a specified machine, taking a snap-shot every
time the generator 'f' yields.
### Response:
def monitor_counters(mc, output, counters, detailed, f):
"""Monitor the counters on a specified machine, taking a snap-shot every
time the generator 'f' yields."""
# Print CSV header
output.write("time,{}{}\n".format("x,y," if detailed else "",
",".join(counters)))
system_info = mc.get_system_info()
# Make an initial sample of the counters
last_counter_values = sample_counters(mc, system_info)
start_time = time.time()
for _ in f():
# Snapshot the change in counter values
counter_values = sample_counters(mc, system_info)
delta = deltas(last_counter_values, counter_values)
last_counter_values = counter_values
now = time.time() - start_time
# Output the changes
if detailed:
for x, y in sorted(system_info):
output.write("{:0.1f},{},{},{}\n".format(
now, x, y,
",".join(str(getattr(delta[(x, y)], c))
for c in counters)))
else:
totals = [0 for _ in counters]
for xy in sorted(system_info):
for i, counter in enumerate(counters):
totals[i] += getattr(delta[xy], counter)
output.write("{:0.1f},{}\n".format(
now, ",".join(map(str, totals)))) |
def recount_view(request):
"""
Recount number_of_messages for all threads and number_of_responses for all requests.
Also set the change_date for every thread to the post_date of the latest message
associated with that thread.
"""
requests_changed = 0
for req in Request.objects.all():
recount = Response.objects.filter(request=req).count()
if req.number_of_responses != recount:
req.number_of_responses = recount
req.save()
requests_changed += 1
threads_changed = 0
for thread in Thread.objects.all():
recount = Message.objects.filter(thread=thread).count()
if thread.number_of_messages != recount:
thread.number_of_messages = recount
thread.save()
threads_changed += 1
dates_changed = 0
for thread in Thread.objects.all():
if thread.change_date != thread.message_set.latest('post_date').post_date:
thread.change_date = thread.message_set.latest('post_date').post_date
thread.save()
dates_changed += 1
messages.add_message(request, messages.SUCCESS, MESSAGES['RECOUNTED'].format(
requests_changed=requests_changed,
request_count=Request.objects.all().count(),
threads_changed=threads_changed,
thread_count=Thread.objects.all().count(),
dates_changed=dates_changed,
))
return HttpResponseRedirect(reverse('utilities')) | Recount number_of_messages for all threads and number_of_responses for all requests.
Also set the change_date for every thread to the post_date of the latest message
associated with that thread. | Below is the the instruction that describes the task:
### Input:
Recount number_of_messages for all threads and number_of_responses for all requests.
Also set the change_date for every thread to the post_date of the latest message
associated with that thread.
### Response:
def recount_view(request):
"""
Recount number_of_messages for all threads and number_of_responses for all requests.
Also set the change_date for every thread to the post_date of the latest message
associated with that thread.
"""
requests_changed = 0
for req in Request.objects.all():
recount = Response.objects.filter(request=req).count()
if req.number_of_responses != recount:
req.number_of_responses = recount
req.save()
requests_changed += 1
threads_changed = 0
for thread in Thread.objects.all():
recount = Message.objects.filter(thread=thread).count()
if thread.number_of_messages != recount:
thread.number_of_messages = recount
thread.save()
threads_changed += 1
dates_changed = 0
for thread in Thread.objects.all():
if thread.change_date != thread.message_set.latest('post_date').post_date:
thread.change_date = thread.message_set.latest('post_date').post_date
thread.save()
dates_changed += 1
messages.add_message(request, messages.SUCCESS, MESSAGES['RECOUNTED'].format(
requests_changed=requests_changed,
request_count=Request.objects.all().count(),
threads_changed=threads_changed,
thread_count=Thread.objects.all().count(),
dates_changed=dates_changed,
))
return HttpResponseRedirect(reverse('utilities')) |
def angle(self, value):
"""gets/sets the angle"""
if self._angle != value and \
isinstance(value, (int, float, long)):
self._angle = value | gets/sets the angle | Below is the the instruction that describes the task:
### Input:
gets/sets the angle
### Response:
def angle(self, value):
"""gets/sets the angle"""
if self._angle != value and \
isinstance(value, (int, float, long)):
self._angle = value |
def is_func_decorator(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if parent.is_statement or isinstance(
parent,
(astroid.Lambda, scoped_nodes.ComprehensionScope, scoped_nodes.ListComp),
):
break
parent = parent.parent
return False | return true if the name is used in function decorator | Below is the the instruction that describes the task:
### Input:
return true if the name is used in function decorator
### Response:
def is_func_decorator(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if parent.is_statement or isinstance(
parent,
(astroid.Lambda, scoped_nodes.ComprehensionScope, scoped_nodes.ListComp),
):
break
parent = parent.parent
return False |
def _zeros(ftype, *dims):
"""Return a new farray filled with zeros."""
shape = _dims2shape(*dims)
objs = [ftype.box(0) for _ in range(_volume(shape))]
return farray(objs, shape, ftype) | Return a new farray filled with zeros. | Below is the the instruction that describes the task:
### Input:
Return a new farray filled with zeros.
### Response:
def _zeros(ftype, *dims):
"""Return a new farray filled with zeros."""
shape = _dims2shape(*dims)
objs = [ftype.box(0) for _ in range(_volume(shape))]
return farray(objs, shape, ftype) |
def argparse(argv, parser, arguments):
""" A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
argv : Observable
An Observable of strings.
parser : Observable
An Observable containing one Parser item.
arguments : Observable
An Observable containing ArgumentDef items.
Returns
-------
Observable
An Observable of Argument items.
"""
def add_arg(parser, arg_spec):
parser.add_argument(arg_spec.name, help=arg_spec.help)
return parser
parse_request = parser \
.map(lambda i: ArgumentParser(description=i.description)) \
.combine_latest(arguments, lambda parser, arg_def: add_arg(parser,arg_def)) \
.last() \
.combine_latest(argv.to_list(), lambda parser, args: (parser,args))
def subscribe(observer):
def on_next(value):
parser, args = value
try:
args = parser.parse_args(args)
for key,value in vars(args).items():
observer.on_next(Argument(key=key, value=value))
except NameError as exc:
observer.on_error("{}\n{}".format(exc, parser.format_help()))
return parse_request.subscribe(on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe) | A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
argv : Observable
An Observable of strings.
parser : Observable
An Observable containing one Parser item.
arguments : Observable
An Observable containing ArgumentDef items.
Returns
-------
Observable
An Observable of Argument items. | Below is the the instruction that describes the task:
### Input:
A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
argv : Observable
An Observable of strings.
parser : Observable
An Observable containing one Parser item.
arguments : Observable
An Observable containing ArgumentDef items.
Returns
-------
Observable
An Observable of Argument items.
### Response:
def argparse(argv, parser, arguments):
""" A command line argument parser.
Parses arguments coming from the argv Observable and outputs them as
Argument items in the output observable.
Parameters
-----------
argv : Observable
An Observable of strings.
parser : Observable
An Observable containing one Parser item.
arguments : Observable
An Observable containing ArgumentDef items.
Returns
-------
Observable
An Observable of Argument items.
"""
def add_arg(parser, arg_spec):
parser.add_argument(arg_spec.name, help=arg_spec.help)
return parser
parse_request = parser \
.map(lambda i: ArgumentParser(description=i.description)) \
.combine_latest(arguments, lambda parser, arg_def: add_arg(parser,arg_def)) \
.last() \
.combine_latest(argv.to_list(), lambda parser, args: (parser,args))
def subscribe(observer):
def on_next(value):
parser, args = value
try:
args = parser.parse_args(args)
for key,value in vars(args).items():
observer.on_next(Argument(key=key, value=value))
except NameError as exc:
observer.on_error("{}\n{}".format(exc, parser.format_help()))
return parse_request.subscribe(on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe) |
def keep_only_positive_boxes(boxes):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
num_boxes = 0
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
positive_boxes.append(boxes_per_image[inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds | Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList) | Below is the the instruction that describes the task:
### Input:
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
### Response:
def keep_only_positive_boxes(boxes):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
num_boxes = 0
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
positive_boxes.append(boxes_per_image[inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds |
def __Delete(self, path, request, headers):
"""Azure Cosmos 'DELETE' http request.
:params str url:
:params str path:
:params dict headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
return synchronized_request.SynchronizedRequest(self,
request,
self._global_endpoint_manager,
self.connection_policy,
self._requests_session,
'DELETE',
path,
request_data=None,
query_params=None,
headers=headers) | Azure Cosmos 'DELETE' http request.
:params str url:
:params str path:
:params dict headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict) | Below is the the instruction that describes the task:
### Input:
Azure Cosmos 'DELETE' http request.
:params str url:
:params str path:
:params dict headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
### Response:
def __Delete(self, path, request, headers):
"""Azure Cosmos 'DELETE' http request.
:params str url:
:params str path:
:params dict headers:
:return:
Tuple of (result, headers).
:rtype:
tuple of (dict, dict)
"""
return synchronized_request.SynchronizedRequest(self,
request,
self._global_endpoint_manager,
self.connection_policy,
self._requests_session,
'DELETE',
path,
request_data=None,
query_params=None,
headers=headers) |
def _set_table(self, schema, **kwargs):
"""
http://sqlite.org/lang_createtable.html
"""
query_str = []
query_str.append("CREATE TABLE {} (".format(self._normalize_table_name(schema)))
query_fields = []
for field_name, field in schema.fields.items():
query_fields.append(' {}'.format(self.get_field_SQL(field_name, field)))
query_str.append(",{}".format(os.linesep).join(query_fields))
query_str.append(')')
query_str = os.linesep.join(query_str)
ret = self._query(query_str, ignore_result=True, **kwargs) | http://sqlite.org/lang_createtable.html | Below is the the instruction that describes the task:
### Input:
http://sqlite.org/lang_createtable.html
### Response:
def _set_table(self, schema, **kwargs):
"""
http://sqlite.org/lang_createtable.html
"""
query_str = []
query_str.append("CREATE TABLE {} (".format(self._normalize_table_name(schema)))
query_fields = []
for field_name, field in schema.fields.items():
query_fields.append(' {}'.format(self.get_field_SQL(field_name, field)))
query_str.append(",{}".format(os.linesep).join(query_fields))
query_str.append(')')
query_str = os.linesep.join(query_str)
ret = self._query(query_str, ignore_result=True, **kwargs) |
def squareAspect(rect):
"""
Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Determine which dimension needs to be cropped
x,y,w,h = rect
if w > h:
cropX = (w - h) // 2
return cropRect(rect, 0, 0, cropX, cropX)
elif w < h:
cropY = (h - w) // 2
return cropRect(rect, cropY, cropY, 0, 0)
# Already a square
return rect | Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h). | Below is the the instruction that describes the task:
### Input:
Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h).
### Response:
def squareAspect(rect):
"""
Crops either the width or height, as necessary, to make a rectangle into a square.
The input rectangle and return value are both a tuple of (x,y,w,h).
"""
# Determine which dimension needs to be cropped
x,y,w,h = rect
if w > h:
cropX = (w - h) // 2
return cropRect(rect, 0, 0, cropX, cropX)
elif w < h:
cropY = (h - w) // 2
return cropRect(rect, cropY, cropY, 0, 0)
# Already a square
return rect |
def delete_datapoint(self, datapoint):
"""Delete the provided datapoint from this stream
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
"""
datapoint = validate_type(datapoint, DataPoint)
self._conn.delete("/ws/DataPoint/{stream_id}/{datapoint_id}".format(
stream_id=self.get_stream_id(),
datapoint_id=datapoint.get_id(),
)) | Delete the provided datapoint from this stream
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error | Below is the the instruction that describes the task:
### Input:
Delete the provided datapoint from this stream
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
### Response:
def delete_datapoint(self, datapoint):
"""Delete the provided datapoint from this stream
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
"""
datapoint = validate_type(datapoint, DataPoint)
self._conn.delete("/ws/DataPoint/{stream_id}/{datapoint_id}".format(
stream_id=self.get_stream_id(),
datapoint_id=datapoint.get_id(),
)) |
def setLog(self, fileName, writeName=False):
"""
Opens a log file with name fileName.
"""
self.log = 1
self.logFile = fileName
self._logPtr = open(fileName, "w")
if writeName:
self._namePtr = open(fileName + ".name", "w") | Opens a log file with name fileName. | Below is the the instruction that describes the task:
### Input:
Opens a log file with name fileName.
### Response:
def setLog(self, fileName, writeName=False):
"""
Opens a log file with name fileName.
"""
self.log = 1
self.logFile = fileName
self._logPtr = open(fileName, "w")
if writeName:
self._namePtr = open(fileName + ".name", "w") |
def chunk(seq: ActualIterable[T]) -> ActualIterable[ActualIterable[T]]:
"""
>>> from Redy.Collections import Traversal, Flow
>>> x = [1, 1, 2]
>>> assert Flow(x)[Traversal.chunk][list].unbox == [[1, 1], [2]]
>>> assert Flow([])[Traversal.chunk][list].unbox == []
"""
seq = iter(seq)
try:
head = next(seq)
except StopIteration:
return iter(seq)
current_status = head
group = [head]
for each in seq:
status = each
if status != current_status:
yield group
group = [each]
else:
group.append(each)
current_status = status
if group:
yield group | >>> from Redy.Collections import Traversal, Flow
>>> x = [1, 1, 2]
>>> assert Flow(x)[Traversal.chunk][list].unbox == [[1, 1], [2]]
>>> assert Flow([])[Traversal.chunk][list].unbox == [] | Below is the the instruction that describes the task:
### Input:
>>> from Redy.Collections import Traversal, Flow
>>> x = [1, 1, 2]
>>> assert Flow(x)[Traversal.chunk][list].unbox == [[1, 1], [2]]
>>> assert Flow([])[Traversal.chunk][list].unbox == []
### Response:
def chunk(seq: ActualIterable[T]) -> ActualIterable[ActualIterable[T]]:
"""
>>> from Redy.Collections import Traversal, Flow
>>> x = [1, 1, 2]
>>> assert Flow(x)[Traversal.chunk][list].unbox == [[1, 1], [2]]
>>> assert Flow([])[Traversal.chunk][list].unbox == []
"""
seq = iter(seq)
try:
head = next(seq)
except StopIteration:
return iter(seq)
current_status = head
group = [head]
for each in seq:
status = each
if status != current_status:
yield group
group = [each]
else:
group.append(each)
current_status = status
if group:
yield group |
def _load(self, key):
'''
Load a single item if you have it
'''
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, six.string_types):
raise KeyError('The key must be a string.')
if '.' not in key:
raise KeyError('The key \'{0}\' should contain a \'.\''.format(key))
mod_name, _ = key.split('.', 1)
with self._lock:
# It is possible that the key is in the dictionary after
# acquiring the lock due to another thread loading it.
if mod_name in self.missing_modules or key in self._dict:
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
log.error(
'Failed to load function %s because its module (%s) is '
'not in the whitelist: %s', key, mod_name, self.whitelist
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and key in self._dict:
return True
return False
# try to load the module
ret = None
reloaded = False
# re-scan up to once, IOErrors or a failed load cause re-scans of the
# filesystem
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self._refresh_file_mapping()
reloaded = True
continue
break
except IOError:
if not reloaded:
self._refresh_file_mapping()
reloaded = True
continue
return ret | Load a single item if you have it | Below is the the instruction that describes the task:
### Input:
Load a single item if you have it
### Response:
def _load(self, key):
'''
Load a single item if you have it
'''
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, six.string_types):
raise KeyError('The key must be a string.')
if '.' not in key:
raise KeyError('The key \'{0}\' should contain a \'.\''.format(key))
mod_name, _ = key.split('.', 1)
with self._lock:
# It is possible that the key is in the dictionary after
# acquiring the lock due to another thread loading it.
if mod_name in self.missing_modules or key in self._dict:
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
log.error(
'Failed to load function %s because its module (%s) is '
'not in the whitelist: %s', key, mod_name, self.whitelist
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and key in self._dict:
return True
return False
# try to load the module
ret = None
reloaded = False
# re-scan up to once, IOErrors or a failed load cause re-scans of the
# filesystem
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self._refresh_file_mapping()
reloaded = True
continue
break
except IOError:
if not reloaded:
self._refresh_file_mapping()
reloaded = True
continue
return ret |
def main():
"""
NAME
sundec.py
DESCRIPTION
calculates calculates declination from sun compass measurements
INPUT FORMAT
GMT_offset, lat,long,year,month,day,hours,minutes,shadow_angle
where GMT_offset is the hours to subtract from local time for GMT.
SYNTAX
sundec.py [-i][-f FILE] [< filename ]
OPTIONS
-i for interactive data entry
-f FILE to set file name on command line
otherwise put data in input format in space delimited file
OUTPUT:
declination
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines() # read in data from standard input
for line in data: # step through line by line
dec=spitout(line)
sys.exit()
if '-i' in sys.argv:
while 1: # repeat this block until program killed
sundata={} # dictionary with sundata in it
print ("Time difference between Greenwich Mean Time (hrs to subtract from local time to get GMT): ")
try:
sundata["delta_u"]=input("<cntl-D> to quit ")
except:
print("\n Good-bye\n")
sys.exit()
date=""
date=date+input("Year: <cntl-D to quit> ")
date=date+":"+input("Month: ")
date=date+":"+input("Day: ")
date=date+":"+input("hour: ")
date=date+":"+input("minute: ")
sundata["date"]=date
sundata["lat"]=input("Latitude of sampling site (negative in southern hemisphere): ")
sundata["lon"]=input("Longitude of sampling site (negative for western hemisphere): ")
sundata["shadow_angle"]=input("Shadow angle: ")
print('%7.1f'%(pmag.dosundec(sundata))) # call sundec function from pmag module and print
else:
data=sys.stdin.readlines() # read in data from standard input
for line in data: # step through line by line
dec=spitout(line) | NAME
sundec.py
DESCRIPTION
calculates calculates declination from sun compass measurements
INPUT FORMAT
GMT_offset, lat,long,year,month,day,hours,minutes,shadow_angle
where GMT_offset is the hours to subtract from local time for GMT.
SYNTAX
sundec.py [-i][-f FILE] [< filename ]
OPTIONS
-i for interactive data entry
-f FILE to set file name on command line
otherwise put data in input format in space delimited file
OUTPUT:
declination | Below is the the instruction that describes the task:
### Input:
NAME
sundec.py
DESCRIPTION
calculates calculates declination from sun compass measurements
INPUT FORMAT
GMT_offset, lat,long,year,month,day,hours,minutes,shadow_angle
where GMT_offset is the hours to subtract from local time for GMT.
SYNTAX
sundec.py [-i][-f FILE] [< filename ]
OPTIONS
-i for interactive data entry
-f FILE to set file name on command line
otherwise put data in input format in space delimited file
OUTPUT:
declination
### Response:
def main():
"""
NAME
sundec.py
DESCRIPTION
calculates calculates declination from sun compass measurements
INPUT FORMAT
GMT_offset, lat,long,year,month,day,hours,minutes,shadow_angle
where GMT_offset is the hours to subtract from local time for GMT.
SYNTAX
sundec.py [-i][-f FILE] [< filename ]
OPTIONS
-i for interactive data entry
-f FILE to set file name on command line
otherwise put data in input format in space delimited file
OUTPUT:
declination
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines() # read in data from standard input
for line in data: # step through line by line
dec=spitout(line)
sys.exit()
if '-i' in sys.argv:
while 1: # repeat this block until program killed
sundata={} # dictionary with sundata in it
print ("Time difference between Greenwich Mean Time (hrs to subtract from local time to get GMT): ")
try:
sundata["delta_u"]=input("<cntl-D> to quit ")
except:
print("\n Good-bye\n")
sys.exit()
date=""
date=date+input("Year: <cntl-D to quit> ")
date=date+":"+input("Month: ")
date=date+":"+input("Day: ")
date=date+":"+input("hour: ")
date=date+":"+input("minute: ")
sundata["date"]=date
sundata["lat"]=input("Latitude of sampling site (negative in southern hemisphere): ")
sundata["lon"]=input("Longitude of sampling site (negative for western hemisphere): ")
sundata["shadow_angle"]=input("Shadow angle: ")
print('%7.1f'%(pmag.dosundec(sundata))) # call sundec function from pmag module and print
else:
data=sys.stdin.readlines() # read in data from standard input
for line in data: # step through line by line
dec=spitout(line) |
def byte_list_to_u16le_list(byteData):
"""! @brief Convert a byte array into a halfword array"""
data = []
for i in range(0, len(byteData), 2):
data.append(byteData[i] | (byteData[i + 1] << 8))
return data | ! @brief Convert a byte array into a halfword array | Below is the the instruction that describes the task:
### Input:
! @brief Convert a byte array into a halfword array
### Response:
def byte_list_to_u16le_list(byteData):
"""! @brief Convert a byte array into a halfword array"""
data = []
for i in range(0, len(byteData), 2):
data.append(byteData[i] | (byteData[i + 1] << 8))
return data |
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity)) | Add a model to the document | Below is the the instruction that describes the task:
### Input:
Add a model to the document
### Response:
def add_model(self, model):
"""
Add a model to the document
"""
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity)) |
def _do_history(self, cmd, args):
"""\
Display history.
history Display history.
history clear Clear history.
history clearall Clear history for all shells.
"""
if args and args[0] == 'clear':
readline.clear_history()
readline.write_history_file(self.history_fname)
elif args and args[0] == 'clearall':
readline.clear_history()
shutil.rmtree(self._temp_dir, ignore_errors = True)
os.makedirs(os.path.join(self._temp_dir, 'history'))
else:
readline.write_history_file(self.history_fname)
with open(self.history_fname, 'r', encoding = 'utf8') as f:
self.stdout.write(f.read()) | \
Display history.
history Display history.
history clear Clear history.
history clearall Clear history for all shells. | Below is the the instruction that describes the task:
### Input:
\
Display history.
history Display history.
history clear Clear history.
history clearall Clear history for all shells.
### Response:
def _do_history(self, cmd, args):
"""\
Display history.
history Display history.
history clear Clear history.
history clearall Clear history for all shells.
"""
if args and args[0] == 'clear':
readline.clear_history()
readline.write_history_file(self.history_fname)
elif args and args[0] == 'clearall':
readline.clear_history()
shutil.rmtree(self._temp_dir, ignore_errors = True)
os.makedirs(os.path.join(self._temp_dir, 'history'))
else:
readline.write_history_file(self.history_fname)
with open(self.history_fname, 'r', encoding = 'utf8') as f:
self.stdout.write(f.read()) |
def export_to_json(
table, filename_or_fobj=None, encoding="utf-8", indent=None, *args, **kwargs
):
"""Export a `rows.Table` to a JSON file or file-like object.
If a file-like object is provided it MUST be open in binary mode (like in
`open('myfile.json', mode='wb')`).
"""
# TODO: will work only if table.fields is OrderedDict
fields = table.fields
prepared_table = prepare_to_export(table, *args, **kwargs)
field_names = next(prepared_table)
data = [
{
field_name: _convert(value, fields[field_name], *args, **kwargs)
for field_name, value in zip(field_names, row)
}
for row in prepared_table
]
result = json.dumps(data, indent=indent)
if type(result) is six.text_type: # Python 3
result = result.encode(encoding)
if indent is not None:
# clean up empty spaces at the end of lines
result = b"\n".join(line.rstrip() for line in result.splitlines())
return export_data(filename_or_fobj, result, mode="wb") | Export a `rows.Table` to a JSON file or file-like object.
If a file-like object is provided it MUST be open in binary mode (like in
`open('myfile.json', mode='wb')`). | Below is the the instruction that describes the task:
### Input:
Export a `rows.Table` to a JSON file or file-like object.
If a file-like object is provided it MUST be open in binary mode (like in
`open('myfile.json', mode='wb')`).
### Response:
def export_to_json(
table, filename_or_fobj=None, encoding="utf-8", indent=None, *args, **kwargs
):
"""Export a `rows.Table` to a JSON file or file-like object.
If a file-like object is provided it MUST be open in binary mode (like in
`open('myfile.json', mode='wb')`).
"""
# TODO: will work only if table.fields is OrderedDict
fields = table.fields
prepared_table = prepare_to_export(table, *args, **kwargs)
field_names = next(prepared_table)
data = [
{
field_name: _convert(value, fields[field_name], *args, **kwargs)
for field_name, value in zip(field_names, row)
}
for row in prepared_table
]
result = json.dumps(data, indent=indent)
if type(result) is six.text_type: # Python 3
result = result.encode(encoding)
if indent is not None:
# clean up empty spaces at the end of lines
result = b"\n".join(line.rstrip() for line in result.splitlines())
return export_data(filename_or_fobj, result, mode="wb") |
def delete(self):
""" Easy delete for db models """
try:
if self.exists() is False:
return None
self.db.session.delete(self)
self.db.session.commit()
except (Exception, BaseException) as error:
# fail silently
return None | Easy delete for db models | Below is the the instruction that describes the task:
### Input:
Easy delete for db models
### Response:
def delete(self):
""" Easy delete for db models """
try:
if self.exists() is False:
return None
self.db.session.delete(self)
self.db.session.commit()
except (Exception, BaseException) as error:
# fail silently
return None |
def _view_changed(self, event=None):
"""Linked view transform has changed; update ticks.
"""
tr = self.node_transform(self._linked_view.scene)
p1, p2 = tr.map(self._axis_ends())
if self.orientation in ('left', 'right'):
self.axis.domain = (p1[1], p2[1])
else:
self.axis.domain = (p1[0], p2[0]) | Linked view transform has changed; update ticks. | Below is the the instruction that describes the task:
### Input:
Linked view transform has changed; update ticks.
### Response:
def _view_changed(self, event=None):
"""Linked view transform has changed; update ticks.
"""
tr = self.node_transform(self._linked_view.scene)
p1, p2 = tr.map(self._axis_ends())
if self.orientation in ('left', 'right'):
self.axis.domain = (p1[1], p2[1])
else:
self.axis.domain = (p1[0], p2[0]) |
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphiteHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 2003,
'proto': 'tcp',
'timeout': 15,
'batch': 1,
'max_backlog_multiplier': 5,
'trim_backlog_multiplier': 4,
'keepalive': 0,
'keepaliveinterval': 10,
'flow_info': 0,
'scope_id': 0,
'reconnect_interval': 0,
})
return config | Return the default config for the handler | Below is the the instruction that describes the task:
### Input:
Return the default config for the handler
### Response:
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphiteHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 2003,
'proto': 'tcp',
'timeout': 15,
'batch': 1,
'max_backlog_multiplier': 5,
'trim_backlog_multiplier': 4,
'keepalive': 0,
'keepaliveinterval': 10,
'flow_info': 0,
'scope_id': 0,
'reconnect_interval': 0,
})
return config |
def reset_all(self):
"""Reset status for all servers in the list"""
self._LOG.debug("Marking all CMs as Good.")
for key in self.list:
self.mark_good(key) | Reset status for all servers in the list | Below is the the instruction that describes the task:
### Input:
Reset status for all servers in the list
### Response:
def reset_all(self):
"""Reset status for all servers in the list"""
self._LOG.debug("Marking all CMs as Good.")
for key in self.list:
self.mark_good(key) |
def imag(self, newimag):
"""Setter for the imaginary part.
This method is invoked by ``x.imag = other``.
Parameters
----------
newimag : array-like or scalar
Values to be assigned to the imaginary part of this element.
"""
try:
iter(newimag)
except TypeError:
# `newimag` is not iterable, assume it can be assigned to
# all indexed parts
for part in self.parts:
part.imag = newimag
return
if self.space.is_power_space:
try:
# Set same value in all parts
for part in self.parts:
part.imag = newimag
except (ValueError, TypeError):
# Iterate over all parts and set them separately
for part, new_im in zip(self.parts, newimag):
part.imag = new_im
pass
elif len(newimag) == len(self):
for part, new_im in zip(self.parts, newimag):
part.imag = new_im
else:
raise ValueError(
'dimensions of the new imaginary part does not match the '
'space, got element {} to set real part of {}}'
''.format(newimag, self)) | Setter for the imaginary part.
This method is invoked by ``x.imag = other``.
Parameters
----------
newimag : array-like or scalar
Values to be assigned to the imaginary part of this element. | Below is the the instruction that describes the task:
### Input:
Setter for the imaginary part.
This method is invoked by ``x.imag = other``.
Parameters
----------
newimag : array-like or scalar
Values to be assigned to the imaginary part of this element.
### Response:
def imag(self, newimag):
"""Setter for the imaginary part.
This method is invoked by ``x.imag = other``.
Parameters
----------
newimag : array-like or scalar
Values to be assigned to the imaginary part of this element.
"""
try:
iter(newimag)
except TypeError:
# `newimag` is not iterable, assume it can be assigned to
# all indexed parts
for part in self.parts:
part.imag = newimag
return
if self.space.is_power_space:
try:
# Set same value in all parts
for part in self.parts:
part.imag = newimag
except (ValueError, TypeError):
# Iterate over all parts and set them separately
for part, new_im in zip(self.parts, newimag):
part.imag = new_im
pass
elif len(newimag) == len(self):
for part, new_im in zip(self.parts, newimag):
part.imag = new_im
else:
raise ValueError(
'dimensions of the new imaginary part does not match the '
'space, got element {} to set real part of {}}'
''.format(newimag, self)) |
def write_molpro(basis):
'''Converts a basis set to Molpro format
'''
# Uncontract all, and make as generally-contracted as possible
basis = manip.uncontract_spdf(basis, 0, True)
basis = manip.make_general(basis, False)
basis = sort.sort_basis(basis, True)
s = ''
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
if len(electron_elements) > 0:
# basis set starts with a string
s += 'basis={\n'
# Electron Basis
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).upper()
s += '!\n'
s += '! {:20} {}\n'.format(lut.element_name_from_Z(z), misc.contraction_string(data))
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
am = shell['angular_momentum']
amchar = lut.amint_to_char(am).lower()
s += '{}, {} , {}\n'.format(amchar, sym, ', '.join(exponents))
for c in coefficients:
first, last = find_range(c)
s += 'c, {}.{}, {}\n'.format(first + 1, last + 1, ', '.join(c[first:last + 1]))
s += '}\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '\n\n! Effective core Potentials\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).lower()
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += 'ECP, {}, {}, {} ;\n'.format(sym, data['ecp_electrons'], max_ecp_am)
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am).lower()
s += '{};'.format(len(rexponents))
if am[0] == max_ecp_am:
s += ' ! ul potential\n'
else:
s += ' ! {}-ul potential\n'.format(amchar)
for p in range(len(rexponents)):
s += '{},{},{};\n'.format(rexponents[p], gexponents[p], coefficients[0][p])
return s | Converts a basis set to Molpro format | Below is the the instruction that describes the task:
### Input:
Converts a basis set to Molpro format
### Response:
def write_molpro(basis):
'''Converts a basis set to Molpro format
'''
# Uncontract all, and make as generally-contracted as possible
basis = manip.uncontract_spdf(basis, 0, True)
basis = manip.make_general(basis, False)
basis = sort.sort_basis(basis, True)
s = ''
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
if len(electron_elements) > 0:
# basis set starts with a string
s += 'basis={\n'
# Electron Basis
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).upper()
s += '!\n'
s += '! {:20} {}\n'.format(lut.element_name_from_Z(z), misc.contraction_string(data))
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
am = shell['angular_momentum']
amchar = lut.amint_to_char(am).lower()
s += '{}, {} , {}\n'.format(amchar, sym, ', '.join(exponents))
for c in coefficients:
first, last = find_range(c)
s += 'c, {}.{}, {}\n'.format(first + 1, last + 1, ', '.join(c[first:last + 1]))
s += '}\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '\n\n! Effective core Potentials\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).lower()
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += 'ECP, {}, {}, {} ;\n'.format(sym, data['ecp_electrons'], max_ecp_am)
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am).lower()
s += '{};'.format(len(rexponents))
if am[0] == max_ecp_am:
s += ' ! ul potential\n'
else:
s += ' ! {}-ul potential\n'.format(amchar)
for p in range(len(rexponents)):
s += '{},{},{};\n'.format(rexponents[p], gexponents[p], coefficients[0][p])
return s |
def offer_simple(pool, answer, rationale, student_id, options):
"""
The simple selection algorithm.
This algorithm randomly select an answer from the pool to discard and add the new one when the pool reaches
the limit
"""
existing = pool.setdefault(answer, {})
if len(existing) >= get_max_size(pool, len(options), POOL_ITEM_LENGTH_SIMPLE):
student_id_to_remove = random.choice(existing.keys())
del existing[student_id_to_remove]
existing[student_id] = {}
pool[answer] = existing | The simple selection algorithm.
This algorithm randomly select an answer from the pool to discard and add the new one when the pool reaches
the limit | Below is the the instruction that describes the task:
### Input:
The simple selection algorithm.
This algorithm randomly select an answer from the pool to discard and add the new one when the pool reaches
the limit
### Response:
def offer_simple(pool, answer, rationale, student_id, options):
"""
The simple selection algorithm.
This algorithm randomly select an answer from the pool to discard and add the new one when the pool reaches
the limit
"""
existing = pool.setdefault(answer, {})
if len(existing) >= get_max_size(pool, len(options), POOL_ITEM_LENGTH_SIMPLE):
student_id_to_remove = random.choice(existing.keys())
del existing[student_id_to_remove]
existing[student_id] = {}
pool[answer] = existing |
def list_solvers(args=None):
"""Entry point for listing available solvers."""
parser = argparse.ArgumentParser(
description='''List LP solver available in PSAMM. This will produce a
list of all of the available LP solvers in prioritized
order. Addtional requirements can be imposed with the
arguments (e.g. integer=yes to select only solvers that
support MILP problems). The list will also be influenced
by the PSAMM_SOLVER environment variable which can be
used to only allow specific solvers (e.g.
PSAMM_SOLVER=cplex).''')
parser.add_argument(
'requirement', nargs='*', type=str,
help='Additional requirements on the selected solvers')
parsed_args = parser.parse_args(args)
requirements = {}
for arg in parsed_args.requirement:
try:
key, value = parse_solver_setting(arg)
except ValueError as e:
parser.error(str(e))
else:
requirements[key] = value
solvers = list(filter_solvers(_solvers, requirements))
solver_names = set(solver['name'] for solver in solvers)
# Obtain solver priority from environment variable, if specified.
priority = {}
if 'PSAMM_SOLVER' in os.environ:
names = os.environ['PSAMM_SOLVER'].split(',')
for i, solver_name in enumerate(names):
priority[solver_name] = len(names) - i
solvers = [s for s in solvers if s['name'] in priority]
solver_names = set(priority)
else:
# Use built-in priorities
for solver in solvers:
priority[solver['name']] = solver['priority']
solvers = sorted(solvers, key=lambda s: priority.get(s['name'], 0),
reverse=True)
status = 0
if len(solvers) > 0:
print('Prioritized solvers:')
for solver in solvers:
print('Name: {}'.format(solver['name']))
print('Priority: {}'.format(solver['priority']))
print('MILP (integer) problem support: {}'.format(
solver['integer']))
print('QP (quadratic) problem support: {}'.format(
solver['quadratic']))
print('Rational solution: {}'.format(solver['rational']))
print('Class: {}'.format(solver['class']))
print()
else:
status = 1
print('No solvers fullfil the requirements!')
print()
filtered_solvers_count = len(_solvers) - len(solvers)
if filtered_solvers_count > 0 or len(_solver_import_errors) > 0:
print('Unavailable solvers:')
for solver in _solvers:
if solver['name'] not in solver_names:
print('{}: Does not fullfil the specified requirements'.format(
solver['name']))
for solver, error in iteritems(_solver_import_errors):
print('{}: Error loading solver: {}'.format(solver, error))
if status != 0:
parser.exit(status) | Entry point for listing available solvers. | Below is the the instruction that describes the task:
### Input:
Entry point for listing available solvers.
### Response:
def list_solvers(args=None):
"""Entry point for listing available solvers."""
parser = argparse.ArgumentParser(
description='''List LP solver available in PSAMM. This will produce a
list of all of the available LP solvers in prioritized
order. Addtional requirements can be imposed with the
arguments (e.g. integer=yes to select only solvers that
support MILP problems). The list will also be influenced
by the PSAMM_SOLVER environment variable which can be
used to only allow specific solvers (e.g.
PSAMM_SOLVER=cplex).''')
parser.add_argument(
'requirement', nargs='*', type=str,
help='Additional requirements on the selected solvers')
parsed_args = parser.parse_args(args)
requirements = {}
for arg in parsed_args.requirement:
try:
key, value = parse_solver_setting(arg)
except ValueError as e:
parser.error(str(e))
else:
requirements[key] = value
solvers = list(filter_solvers(_solvers, requirements))
solver_names = set(solver['name'] for solver in solvers)
# Obtain solver priority from environment variable, if specified.
priority = {}
if 'PSAMM_SOLVER' in os.environ:
names = os.environ['PSAMM_SOLVER'].split(',')
for i, solver_name in enumerate(names):
priority[solver_name] = len(names) - i
solvers = [s for s in solvers if s['name'] in priority]
solver_names = set(priority)
else:
# Use built-in priorities
for solver in solvers:
priority[solver['name']] = solver['priority']
solvers = sorted(solvers, key=lambda s: priority.get(s['name'], 0),
reverse=True)
status = 0
if len(solvers) > 0:
print('Prioritized solvers:')
for solver in solvers:
print('Name: {}'.format(solver['name']))
print('Priority: {}'.format(solver['priority']))
print('MILP (integer) problem support: {}'.format(
solver['integer']))
print('QP (quadratic) problem support: {}'.format(
solver['quadratic']))
print('Rational solution: {}'.format(solver['rational']))
print('Class: {}'.format(solver['class']))
print()
else:
status = 1
print('No solvers fullfil the requirements!')
print()
filtered_solvers_count = len(_solvers) - len(solvers)
if filtered_solvers_count > 0 or len(_solver_import_errors) > 0:
print('Unavailable solvers:')
for solver in _solvers:
if solver['name'] not in solver_names:
print('{}: Does not fullfil the specified requirements'.format(
solver['name']))
for solver, error in iteritems(_solver_import_errors):
print('{}: Error loading solver: {}'.format(solver, error))
if status != 0:
parser.exit(status) |
def tag_timexes(self):
"""Create ``timexes`` layer.
Depends on morphological analysis data in ``words`` layer
and tags it automatically, if it is not present."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if not self.is_tagged(TIMEXES):
if self.__timex_tagger is None:
self.__timex_tagger = load_default_timex_tagger()
self.__timex_tagger.tag_document(self, **self.__kwargs)
return self | Create ``timexes`` layer.
Depends on morphological analysis data in ``words`` layer
and tags it automatically, if it is not present. | Below is the the instruction that describes the task:
### Input:
Create ``timexes`` layer.
Depends on morphological analysis data in ``words`` layer
and tags it automatically, if it is not present.
### Response:
def tag_timexes(self):
"""Create ``timexes`` layer.
Depends on morphological analysis data in ``words`` layer
and tags it automatically, if it is not present."""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if not self.is_tagged(TIMEXES):
if self.__timex_tagger is None:
self.__timex_tagger = load_default_timex_tagger()
self.__timex_tagger.tag_document(self, **self.__kwargs)
return self |
def make_chunk_for(output_dir=LOCAL_DIR,
local_dir=LOCAL_DIR,
game_dir=None,
model_num=1,
positions=EXAMPLES_PER_GENERATION,
threads=8,
sampling_frac=0.02):
"""
Explicitly make a golden chunk for a given model `model_num`
(not necessarily the most recent one).
While we haven't yet got enough samples (EXAMPLES_PER_GENERATION)
Add samples from the games of previous model.
"""
game_dir = game_dir or fsdb.selfplay_dir()
ensure_dir_exists(output_dir)
models = [model for model in fsdb.get_models() if model[0] < model_num]
buf = ExampleBuffer(positions, sampling_frac=sampling_frac)
files = []
for _, model in sorted(models, reverse=True):
local_model_dir = os.path.join(local_dir, model)
if not tf.gfile.Exists(local_model_dir):
print("Rsyncing", model)
_rsync_dir(os.path.join(game_dir, model), local_model_dir)
files.extend(tf.gfile.Glob(os.path.join(local_model_dir, '*.zz')))
print("{}: {} games".format(model, len(files)))
if len(files) * 200 * sampling_frac > positions:
break
print("Filling from {} files".format(len(files)))
buf.parallel_fill(files, threads=threads)
print(buf)
output = os.path.join(output_dir, str(model_num) + '.tfrecord.zz')
print("Writing to", output)
buf.flush(output) | Explicitly make a golden chunk for a given model `model_num`
(not necessarily the most recent one).
While we haven't yet got enough samples (EXAMPLES_PER_GENERATION)
Add samples from the games of previous model. | Below is the the instruction that describes the task:
### Input:
Explicitly make a golden chunk for a given model `model_num`
(not necessarily the most recent one).
While we haven't yet got enough samples (EXAMPLES_PER_GENERATION)
Add samples from the games of previous model.
### Response:
def make_chunk_for(output_dir=LOCAL_DIR,
local_dir=LOCAL_DIR,
game_dir=None,
model_num=1,
positions=EXAMPLES_PER_GENERATION,
threads=8,
sampling_frac=0.02):
"""
Explicitly make a golden chunk for a given model `model_num`
(not necessarily the most recent one).
While we haven't yet got enough samples (EXAMPLES_PER_GENERATION)
Add samples from the games of previous model.
"""
game_dir = game_dir or fsdb.selfplay_dir()
ensure_dir_exists(output_dir)
models = [model for model in fsdb.get_models() if model[0] < model_num]
buf = ExampleBuffer(positions, sampling_frac=sampling_frac)
files = []
for _, model in sorted(models, reverse=True):
local_model_dir = os.path.join(local_dir, model)
if not tf.gfile.Exists(local_model_dir):
print("Rsyncing", model)
_rsync_dir(os.path.join(game_dir, model), local_model_dir)
files.extend(tf.gfile.Glob(os.path.join(local_model_dir, '*.zz')))
print("{}: {} games".format(model, len(files)))
if len(files) * 200 * sampling_frac > positions:
break
print("Filling from {} files".format(len(files)))
buf.parallel_fill(files, threads=threads)
print(buf)
output = os.path.join(output_dir, str(model_num) + '.tfrecord.zz')
print("Writing to", output)
buf.flush(output) |
def check_has_path(self, api):
'''An API class must have a `path` attribute.'''
if not hasattr(api, 'path'):
msg = 'The Api class "{}" lacks a `path` attribute.'
return [msg.format(api.__name__)] | An API class must have a `path` attribute. | Below is the the instruction that describes the task:
### Input:
An API class must have a `path` attribute.
### Response:
def check_has_path(self, api):
'''An API class must have a `path` attribute.'''
if not hasattr(api, 'path'):
msg = 'The Api class "{}" lacks a `path` attribute.'
return [msg.format(api.__name__)] |
def remove_from_gallery(self):
"""Remove this image from the gallery."""
url = self._imgur._base_url + "/3/gallery/{0}".format(self.id)
self._imgur._send_request(url, needs_auth=True, method='DELETE')
if isinstance(self, Image):
item = self._imgur.get_image(self.id)
else:
item = self._imgur.get_album(self.id)
_change_object(self, item)
return self | Remove this image from the gallery. | Below is the the instruction that describes the task:
### Input:
Remove this image from the gallery.
### Response:
def remove_from_gallery(self):
"""Remove this image from the gallery."""
url = self._imgur._base_url + "/3/gallery/{0}".format(self.id)
self._imgur._send_request(url, needs_auth=True, method='DELETE')
if isinstance(self, Image):
item = self._imgur.get_image(self.id)
else:
item = self._imgur.get_album(self.id)
_change_object(self, item)
return self |
def get_api_docs(routes):
"""
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
"""
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation | Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation | Below is the the instruction that describes the task:
### Input:
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
### Response:
def get_api_docs(routes):
"""
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
"""
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation |
def transform(self, X):
"""Imputes all missing values in X.
Note that this is stochastic, and that if random_state is not fixed,
repeated calls, or permuted input, will yield different results.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
check_is_fitted(self, 'initial_imputer_')
X, Xt, mask_missing_values = self._initial_imputation(X)
if self.n_iter == 0:
return Xt
imputations_per_round = len(self.imputation_sequence_) // self.n_iter
i_rnd = 0
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s"
% (X.shape,))
start_t = time()
for it, predictor_triplet in enumerate(self.imputation_sequence_):
Xt, _ = self._impute_one_feature(
Xt,
mask_missing_values,
predictor_triplet.feat_idx,
predictor_triplet.neighbor_feat_idx,
predictor=predictor_triplet.predictor,
fit_mode=False
)
if not (it + 1) % imputations_per_round:
if self.verbose > 1:
print('[IterativeImputer] Ending imputation round '
'%d/%d, elapsed time %0.2f'
% (i_rnd + 1, self.n_iter, time() - start_t))
i_rnd += 1
Xt[~mask_missing_values] = X[~mask_missing_values]
return Xt | Imputes all missing values in X.
Note that this is stochastic, and that if random_state is not fixed,
repeated calls, or permuted input, will yield different results.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data. | Below is the the instruction that describes the task:
### Input:
Imputes all missing values in X.
Note that this is stochastic, and that if random_state is not fixed,
repeated calls, or permuted input, will yield different results.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
### Response:
def transform(self, X):
"""Imputes all missing values in X.
Note that this is stochastic, and that if random_state is not fixed,
repeated calls, or permuted input, will yield different results.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
check_is_fitted(self, 'initial_imputer_')
X, Xt, mask_missing_values = self._initial_imputation(X)
if self.n_iter == 0:
return Xt
imputations_per_round = len(self.imputation_sequence_) // self.n_iter
i_rnd = 0
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s"
% (X.shape,))
start_t = time()
for it, predictor_triplet in enumerate(self.imputation_sequence_):
Xt, _ = self._impute_one_feature(
Xt,
mask_missing_values,
predictor_triplet.feat_idx,
predictor_triplet.neighbor_feat_idx,
predictor=predictor_triplet.predictor,
fit_mode=False
)
if not (it + 1) % imputations_per_round:
if self.verbose > 1:
print('[IterativeImputer] Ending imputation round '
'%d/%d, elapsed time %0.2f'
% (i_rnd + 1, self.n_iter, time() - start_t))
i_rnd += 1
Xt[~mask_missing_values] = X[~mask_missing_values]
return Xt |
def repl_member_add(self, params):
"""create new mongod instances and add it to the replica set.
Args:
params - mongod params
return True if operation success otherwise False
"""
repl_config = self.config
member_id = max([member['_id'] for member in repl_config['members']]) + 1
member_config = self.member_create(params, member_id)
repl_config['members'].append(member_config)
if not self.repl_update(repl_config):
self.member_del(member_id, reconfig=True)
raise ReplicaSetError("Could not add member to ReplicaSet.")
return member_id | create new mongod instances and add it to the replica set.
Args:
params - mongod params
return True if operation success otherwise False | Below is the the instruction that describes the task:
### Input:
create new mongod instances and add it to the replica set.
Args:
params - mongod params
return True if operation success otherwise False
### Response:
def repl_member_add(self, params):
"""create new mongod instances and add it to the replica set.
Args:
params - mongod params
return True if operation success otherwise False
"""
repl_config = self.config
member_id = max([member['_id'] for member in repl_config['members']]) + 1
member_config = self.member_create(params, member_id)
repl_config['members'].append(member_config)
if not self.repl_update(repl_config):
self.member_del(member_id, reconfig=True)
raise ReplicaSetError("Could not add member to ReplicaSet.")
return member_id |
def _get_oxm_field_int(self):
"""Return a valid integer value for oxm_field.
Used while packing.
Returns:
int: valid oxm_field value.
Raises:
ValueError: If :attribute:`oxm_field` is bigger than 7 bits or
should be :class:`OxmOfbMatchField` and the enum has no such
value.
"""
if self.oxm_class == OxmClass.OFPXMC_OPENFLOW_BASIC:
return OxmOfbMatchField(self.oxm_field).value
elif not isinstance(self.oxm_field, int) or self.oxm_field > 127:
raise ValueError('oxm_field above 127: "{self.oxm_field}".')
return self.oxm_field | Return a valid integer value for oxm_field.
Used while packing.
Returns:
int: valid oxm_field value.
Raises:
ValueError: If :attribute:`oxm_field` is bigger than 7 bits or
should be :class:`OxmOfbMatchField` and the enum has no such
value. | Below is the the instruction that describes the task:
### Input:
Return a valid integer value for oxm_field.
Used while packing.
Returns:
int: valid oxm_field value.
Raises:
ValueError: If :attribute:`oxm_field` is bigger than 7 bits or
should be :class:`OxmOfbMatchField` and the enum has no such
value.
### Response:
def _get_oxm_field_int(self):
"""Return a valid integer value for oxm_field.
Used while packing.
Returns:
int: valid oxm_field value.
Raises:
ValueError: If :attribute:`oxm_field` is bigger than 7 bits or
should be :class:`OxmOfbMatchField` and the enum has no such
value.
"""
if self.oxm_class == OxmClass.OFPXMC_OPENFLOW_BASIC:
return OxmOfbMatchField(self.oxm_field).value
elif not isinstance(self.oxm_field, int) or self.oxm_field > 127:
raise ValueError('oxm_field above 127: "{self.oxm_field}".')
return self.oxm_field |
def get_inputs_outputs(namespace, method, snapshot_id):
"""Get a description of the inputs and outputs for a method.
The method should exist in the methods repository.
Args:
namespace (str): Methods namespace
method (str): method name
snapshot_id (int): snapshot_id of the method
Swagger:
https://api.firecloud.org/#!/Method_Repository/getMethodIO
"""
body = {
"methodNamespace" : namespace,
"methodName" : method,
"methodVersion" : snapshot_id
}
return __post("inputsOutputs", json=body) | Get a description of the inputs and outputs for a method.
The method should exist in the methods repository.
Args:
namespace (str): Methods namespace
method (str): method name
snapshot_id (int): snapshot_id of the method
Swagger:
https://api.firecloud.org/#!/Method_Repository/getMethodIO | Below is the the instruction that describes the task:
### Input:
Get a description of the inputs and outputs for a method.
The method should exist in the methods repository.
Args:
namespace (str): Methods namespace
method (str): method name
snapshot_id (int): snapshot_id of the method
Swagger:
https://api.firecloud.org/#!/Method_Repository/getMethodIO
### Response:
def get_inputs_outputs(namespace, method, snapshot_id):
"""Get a description of the inputs and outputs for a method.
The method should exist in the methods repository.
Args:
namespace (str): Methods namespace
method (str): method name
snapshot_id (int): snapshot_id of the method
Swagger:
https://api.firecloud.org/#!/Method_Repository/getMethodIO
"""
body = {
"methodNamespace" : namespace,
"methodName" : method,
"methodVersion" : snapshot_id
}
return __post("inputsOutputs", json=body) |
def compare(node1, node2):
"""Compares two Werkzeug hg versions."""
if not os.path.isdir("a"):
print("error: comparison feature not initialized", file=sys.stderr)
sys.exit(4)
print("=" * 80)
print("WERKZEUG INTERNAL BENCHMARK -- COMPARE MODE".center(80))
print("-" * 80)
def _hg_update(repo, node):
def hg(*x):
return subprocess.call(
["hg"] + list(x), cwd=repo, stdout=null_out, stderr=null_out
)
hg("revert", "-a", "--no-backup")
client = subprocess.Popen(
["hg", "status", "--unknown", "-n", "-0"], stdout=subprocess.PIPE, cwd=repo
)
unknown = client.communicate()[0]
if unknown:
client = subprocess.Popen(
["xargs", "-0", "rm", "-f"],
cwd=repo,
stdout=null_out,
stdin=subprocess.PIPE,
)
client.communicate(unknown)
hg("pull", "../..")
hg("update", node)
if node == "tip":
diff = subprocess.Popen(
["hg", "diff"], cwd="..", stdout=subprocess.PIPE
).communicate()[0]
if diff:
client = subprocess.Popen(
["hg", "import", "--no-commit", "-"],
cwd=repo,
stdout=null_out,
stdin=subprocess.PIPE,
)
client.communicate(diff)
_hg_update("a", node1)
_hg_update("b", node2)
d1 = run("a", no_header=True)
d2 = run("b", no_header=True)
print("DIRECT COMPARISON".center(80))
print("-" * 80)
for key in sorted(d1):
delta = d1[key] - d2[key]
if abs(1 - d1[key] / d2[key]) < TOLERANCE or abs(delta) < MIN_RESOLUTION:
delta = "=="
else:
delta = "%+.4f (%+d%%)" % (delta, round(d2[key] / d1[key] * 100 - 100))
print("%36s %.4f %.4f %s" % (format_func(key), d1[key], d2[key], delta))
print("-" * 80) | Compares two Werkzeug hg versions. | Below is the the instruction that describes the task:
### Input:
Compares two Werkzeug hg versions.
### Response:
def compare(node1, node2):
"""Compares two Werkzeug hg versions."""
if not os.path.isdir("a"):
print("error: comparison feature not initialized", file=sys.stderr)
sys.exit(4)
print("=" * 80)
print("WERKZEUG INTERNAL BENCHMARK -- COMPARE MODE".center(80))
print("-" * 80)
def _hg_update(repo, node):
def hg(*x):
return subprocess.call(
["hg"] + list(x), cwd=repo, stdout=null_out, stderr=null_out
)
hg("revert", "-a", "--no-backup")
client = subprocess.Popen(
["hg", "status", "--unknown", "-n", "-0"], stdout=subprocess.PIPE, cwd=repo
)
unknown = client.communicate()[0]
if unknown:
client = subprocess.Popen(
["xargs", "-0", "rm", "-f"],
cwd=repo,
stdout=null_out,
stdin=subprocess.PIPE,
)
client.communicate(unknown)
hg("pull", "../..")
hg("update", node)
if node == "tip":
diff = subprocess.Popen(
["hg", "diff"], cwd="..", stdout=subprocess.PIPE
).communicate()[0]
if diff:
client = subprocess.Popen(
["hg", "import", "--no-commit", "-"],
cwd=repo,
stdout=null_out,
stdin=subprocess.PIPE,
)
client.communicate(diff)
_hg_update("a", node1)
_hg_update("b", node2)
d1 = run("a", no_header=True)
d2 = run("b", no_header=True)
print("DIRECT COMPARISON".center(80))
print("-" * 80)
for key in sorted(d1):
delta = d1[key] - d2[key]
if abs(1 - d1[key] / d2[key]) < TOLERANCE or abs(delta) < MIN_RESOLUTION:
delta = "=="
else:
delta = "%+.4f (%+d%%)" % (delta, round(d2[key] / d1[key] * 100 - 100))
print("%36s %.4f %.4f %s" % (format_func(key), d1[key], d2[key], delta))
print("-" * 80) |
def load_stock_quantity(self):
""" Loads quantities for all stocks """
info = StocksInfo(self.config)
for stock in self.model.stocks:
stock.quantity = info.load_stock_quantity(stock.symbol)
info.gc_book.close() | Loads quantities for all stocks | Below is the the instruction that describes the task:
### Input:
Loads quantities for all stocks
### Response:
def load_stock_quantity(self):
""" Loads quantities for all stocks """
info = StocksInfo(self.config)
for stock in self.model.stocks:
stock.quantity = info.load_stock_quantity(stock.symbol)
info.gc_book.close() |
def ncdegree(polynomial):
"""Returns the degree of a noncommutative polynomial.
:param polynomial: Polynomial of noncommutive variables.
:type polynomial: :class:`sympy.core.expr.Expr`.
:returns: int -- the degree of the polynomial.
"""
degree = 0
if is_number_type(polynomial):
return degree
polynomial = polynomial.expand()
for monomial in polynomial.as_coefficients_dict():
subdegree = 0
for variable in monomial.as_coeff_mul()[1]:
if isinstance(variable, Pow):
subdegree += variable.exp
elif not isinstance(variable, Number) and variable != I:
subdegree += 1
if subdegree > degree:
degree = subdegree
return degree | Returns the degree of a noncommutative polynomial.
:param polynomial: Polynomial of noncommutive variables.
:type polynomial: :class:`sympy.core.expr.Expr`.
:returns: int -- the degree of the polynomial. | Below is the the instruction that describes the task:
### Input:
Returns the degree of a noncommutative polynomial.
:param polynomial: Polynomial of noncommutive variables.
:type polynomial: :class:`sympy.core.expr.Expr`.
:returns: int -- the degree of the polynomial.
### Response:
def ncdegree(polynomial):
"""Returns the degree of a noncommutative polynomial.
:param polynomial: Polynomial of noncommutive variables.
:type polynomial: :class:`sympy.core.expr.Expr`.
:returns: int -- the degree of the polynomial.
"""
degree = 0
if is_number_type(polynomial):
return degree
polynomial = polynomial.expand()
for monomial in polynomial.as_coefficients_dict():
subdegree = 0
for variable in monomial.as_coeff_mul()[1]:
if isinstance(variable, Pow):
subdegree += variable.exp
elif not isinstance(variable, Number) and variable != I:
subdegree += 1
if subdegree > degree:
degree = subdegree
return degree |
def main(device_type):
"""Parse command-line arguments."""
epilog = ('See https://github.com/romanz/trezor-agent/blob/master/'
'doc/README-GPG.md for usage examples.')
parser = argparse.ArgumentParser(epilog=epilog)
agent_package = device_type.package_name()
resources_map = {r.key: r for r in pkg_resources.require(agent_package)}
resources = [resources_map[agent_package], resources_map['libagent']]
versions = '\n'.join('{}={}'.format(r.key, r.version) for r in resources)
parser.add_argument('--version', help='print the version info',
action='version', version=versions)
subparsers = parser.add_subparsers(title='Action', dest='action')
subparsers.required = True
p = subparsers.add_parser('init',
help='initialize hardware-based GnuPG identity')
p.add_argument('user_id')
p.add_argument('-e', '--ecdsa-curve', default='nist256p1')
p.add_argument('-t', '--time', type=int, default=int(time.time()))
p.add_argument('-v', '--verbose', default=0, action='count')
p.add_argument('-s', '--subkey', default=False, action='store_true')
p.add_argument('--homedir', type=str, default=os.environ.get('GNUPGHOME'),
help='Customize GnuPG home directory for the new identity.')
p.add_argument('--pin-entry-binary', type=str, default='pinentry',
help='Path to PIN entry UI helper.')
p.add_argument('--passphrase-entry-binary', type=str, default='pinentry',
help='Path to passphrase entry UI helper.')
p.add_argument('--cache-expiry-seconds', type=float, default=float('inf'),
help='Expire passphrase from cache after this duration.')
p.set_defaults(func=run_init)
p = subparsers.add_parser('unlock', help='unlock the hardware device')
p.add_argument('-v', '--verbose', default=0, action='count')
p.set_defaults(func=run_unlock)
args = parser.parse_args()
device_type.ui = device.ui.UI(device_type=device_type, config=vars(args))
device_type.ui.cached_passphrase_ack = util.ExpiringCache(
seconds=float(args.cache_expiry_seconds))
return args.func(device_type=device_type, args=args) | Parse command-line arguments. | Below is the the instruction that describes the task:
### Input:
Parse command-line arguments.
### Response:
def main(device_type):
"""Parse command-line arguments."""
epilog = ('See https://github.com/romanz/trezor-agent/blob/master/'
'doc/README-GPG.md for usage examples.')
parser = argparse.ArgumentParser(epilog=epilog)
agent_package = device_type.package_name()
resources_map = {r.key: r for r in pkg_resources.require(agent_package)}
resources = [resources_map[agent_package], resources_map['libagent']]
versions = '\n'.join('{}={}'.format(r.key, r.version) for r in resources)
parser.add_argument('--version', help='print the version info',
action='version', version=versions)
subparsers = parser.add_subparsers(title='Action', dest='action')
subparsers.required = True
p = subparsers.add_parser('init',
help='initialize hardware-based GnuPG identity')
p.add_argument('user_id')
p.add_argument('-e', '--ecdsa-curve', default='nist256p1')
p.add_argument('-t', '--time', type=int, default=int(time.time()))
p.add_argument('-v', '--verbose', default=0, action='count')
p.add_argument('-s', '--subkey', default=False, action='store_true')
p.add_argument('--homedir', type=str, default=os.environ.get('GNUPGHOME'),
help='Customize GnuPG home directory for the new identity.')
p.add_argument('--pin-entry-binary', type=str, default='pinentry',
help='Path to PIN entry UI helper.')
p.add_argument('--passphrase-entry-binary', type=str, default='pinentry',
help='Path to passphrase entry UI helper.')
p.add_argument('--cache-expiry-seconds', type=float, default=float('inf'),
help='Expire passphrase from cache after this duration.')
p.set_defaults(func=run_init)
p = subparsers.add_parser('unlock', help='unlock the hardware device')
p.add_argument('-v', '--verbose', default=0, action='count')
p.set_defaults(func=run_unlock)
args = parser.parse_args()
device_type.ui = device.ui.UI(device_type=device_type, config=vars(args))
device_type.ui.cached_passphrase_ack = util.ExpiringCache(
seconds=float(args.cache_expiry_seconds))
return args.func(device_type=device_type, args=args) |
def export_image(input, output, timeout=20, palette='white', resolution=150, layers=None, command=None, mirror=False, showgui=False):
'''
Exporting eagle .sch or .brd file into image file.
GUI is not displayed if ``pyvirtualdisplay`` is installed.
If export is blocked somehow (e.g. popup window is displayed) then after timeout operation is canceled with exception.
Problem can be investigated by setting 'showgui' flag.
Exporting generates an image file with a format corresponding
to the given filename extension.
The following image formats are available:
.bmp Windows Bitmap Files
.png Portable Network Graphics Files
.pbm Portable Bitmap Files
.pgm Portable Grayscale Bitmap Files
.ppm Portable Pixelmap Files
.tif TIFF Files
.xbm X Bitmap Files
.xpm X Pixmap Files
:param input: eagle .sch or .brd file name
:param output: image file name, existing file will be removed first!
:param palette: background color [None,black,white,colored]
:param resolution: image resolution in dpi (50..2400)
:param timeout: operation is canceled after this timeout (sec)
:param showgui: eagle GUI is displayed
:param layers: list, layers to be displayed ['top','pads']
:param command: string, direct eagle command
:param mirror: Bool
:rtype: None
'''
input = norm_path(input)
output = norm_path(output)
if palette:
palette = palette.lower()
if palette == 'none':
palette = None
cmds = []
if palette is not None:
cmds += ['SET PALETTE {palette}'.format(palette=palette)]
if layers is not None:
cmds += ['DISPLAY NONE ' + ' '.join(layers)]
if command is not None:
cmds += [command]
if mirror:
f = tempfile.NamedTemporaryFile(suffix='.png', prefix='eagexp_')
fout = f.name
else:
fout = output
commands = export_command(output=fout, output_type='image',
commands=cmds, resolution=resolution)
command_eagle(
input=input, timeout=timeout, commands=commands, showgui=showgui)
if mirror:
im = Image.open(fout)
# save dpi info
info = im.info
im = ImageOps.mirror(im)
im.save(output, **info) | Exporting eagle .sch or .brd file into image file.
GUI is not displayed if ``pyvirtualdisplay`` is installed.
If export is blocked somehow (e.g. popup window is displayed) then after timeout operation is canceled with exception.
Problem can be investigated by setting 'showgui' flag.
Exporting generates an image file with a format corresponding
to the given filename extension.
The following image formats are available:
.bmp Windows Bitmap Files
.png Portable Network Graphics Files
.pbm Portable Bitmap Files
.pgm Portable Grayscale Bitmap Files
.ppm Portable Pixelmap Files
.tif TIFF Files
.xbm X Bitmap Files
.xpm X Pixmap Files
:param input: eagle .sch or .brd file name
:param output: image file name, existing file will be removed first!
:param palette: background color [None,black,white,colored]
:param resolution: image resolution in dpi (50..2400)
:param timeout: operation is canceled after this timeout (sec)
:param showgui: eagle GUI is displayed
:param layers: list, layers to be displayed ['top','pads']
:param command: string, direct eagle command
:param mirror: Bool
:rtype: None | Below is the the instruction that describes the task:
### Input:
Exporting eagle .sch or .brd file into image file.
GUI is not displayed if ``pyvirtualdisplay`` is installed.
If export is blocked somehow (e.g. popup window is displayed) then after timeout operation is canceled with exception.
Problem can be investigated by setting 'showgui' flag.
Exporting generates an image file with a format corresponding
to the given filename extension.
The following image formats are available:
.bmp Windows Bitmap Files
.png Portable Network Graphics Files
.pbm Portable Bitmap Files
.pgm Portable Grayscale Bitmap Files
.ppm Portable Pixelmap Files
.tif TIFF Files
.xbm X Bitmap Files
.xpm X Pixmap Files
:param input: eagle .sch or .brd file name
:param output: image file name, existing file will be removed first!
:param palette: background color [None,black,white,colored]
:param resolution: image resolution in dpi (50..2400)
:param timeout: operation is canceled after this timeout (sec)
:param showgui: eagle GUI is displayed
:param layers: list, layers to be displayed ['top','pads']
:param command: string, direct eagle command
:param mirror: Bool
:rtype: None
### Response:
def export_image(input, output, timeout=20, palette='white', resolution=150, layers=None, command=None, mirror=False, showgui=False):
'''
Exporting eagle .sch or .brd file into image file.
GUI is not displayed if ``pyvirtualdisplay`` is installed.
If export is blocked somehow (e.g. popup window is displayed) then after timeout operation is canceled with exception.
Problem can be investigated by setting 'showgui' flag.
Exporting generates an image file with a format corresponding
to the given filename extension.
The following image formats are available:
.bmp Windows Bitmap Files
.png Portable Network Graphics Files
.pbm Portable Bitmap Files
.pgm Portable Grayscale Bitmap Files
.ppm Portable Pixelmap Files
.tif TIFF Files
.xbm X Bitmap Files
.xpm X Pixmap Files
:param input: eagle .sch or .brd file name
:param output: image file name, existing file will be removed first!
:param palette: background color [None,black,white,colored]
:param resolution: image resolution in dpi (50..2400)
:param timeout: operation is canceled after this timeout (sec)
:param showgui: eagle GUI is displayed
:param layers: list, layers to be displayed ['top','pads']
:param command: string, direct eagle command
:param mirror: Bool
:rtype: None
'''
input = norm_path(input)
output = norm_path(output)
if palette:
palette = palette.lower()
if palette == 'none':
palette = None
cmds = []
if palette is not None:
cmds += ['SET PALETTE {palette}'.format(palette=palette)]
if layers is not None:
cmds += ['DISPLAY NONE ' + ' '.join(layers)]
if command is not None:
cmds += [command]
if mirror:
f = tempfile.NamedTemporaryFile(suffix='.png', prefix='eagexp_')
fout = f.name
else:
fout = output
commands = export_command(output=fout, output_type='image',
commands=cmds, resolution=resolution)
command_eagle(
input=input, timeout=timeout, commands=commands, showgui=showgui)
if mirror:
im = Image.open(fout)
# save dpi info
info = im.info
im = ImageOps.mirror(im)
im.save(output, **info) |
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
return self._orig(self._obj, *args, **kwargs) | Calls the original function. | Below is the the instruction that describes the task:
### Input:
Calls the original function.
### Response:
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
return self._orig(self._obj, *args, **kwargs) |
def QA_indicator_RSI(DataFrame, N1=12, N2=26, N3=9):
'相对强弱指标RSI1:SMA(MAX(CLOSE-LC,0),N1,1)/SMA(ABS(CLOSE-LC),N1,1)*100;'
CLOSE = DataFrame['close']
LC = REF(CLOSE, 1)
RSI1 = SMA(MAX(CLOSE - LC, 0), N1) / SMA(ABS(CLOSE - LC), N1) * 100
RSI2 = SMA(MAX(CLOSE - LC, 0), N2) / SMA(ABS(CLOSE - LC), N2) * 100
RSI3 = SMA(MAX(CLOSE - LC, 0), N3) / SMA(ABS(CLOSE - LC), N3) * 100
DICT = {'RSI1': RSI1, 'RSI2': RSI2, 'RSI3': RSI3}
return pd.DataFrame(DICT) | 相对强弱指标RSI1:SMA(MAX(CLOSE-LC,0),N1,1)/SMA(ABS(CLOSE-LC),N1,1)*100; | Below is the the instruction that describes the task:
### Input:
相对强弱指标RSI1:SMA(MAX(CLOSE-LC,0),N1,1)/SMA(ABS(CLOSE-LC),N1,1)*100;
### Response:
def QA_indicator_RSI(DataFrame, N1=12, N2=26, N3=9):
'相对强弱指标RSI1:SMA(MAX(CLOSE-LC,0),N1,1)/SMA(ABS(CLOSE-LC),N1,1)*100;'
CLOSE = DataFrame['close']
LC = REF(CLOSE, 1)
RSI1 = SMA(MAX(CLOSE - LC, 0), N1) / SMA(ABS(CLOSE - LC), N1) * 100
RSI2 = SMA(MAX(CLOSE - LC, 0), N2) / SMA(ABS(CLOSE - LC), N2) * 100
RSI3 = SMA(MAX(CLOSE - LC, 0), N3) / SMA(ABS(CLOSE - LC), N3) * 100
DICT = {'RSI1': RSI1, 'RSI2': RSI2, 'RSI3': RSI3}
return pd.DataFrame(DICT) |
def whichEncoding(self):
"""How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM.
@change: 2.1.0 added the ENCODE_HTML_FORM response.
"""
if self.request.mode in BROWSER_REQUEST_MODES:
if self.fields.getOpenIDNamespace() == OPENID2_NS and \
len(self.encodeToURL()) > OPENID1_URL_LIMIT:
return ENCODE_HTML_FORM
else:
return ENCODE_URL
else:
return ENCODE_KVFORM | How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM.
@change: 2.1.0 added the ENCODE_HTML_FORM response. | Below is the the instruction that describes the task:
### Input:
How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM.
@change: 2.1.0 added the ENCODE_HTML_FORM response.
### Response:
def whichEncoding(self):
"""How should I be encoded?
@returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM.
@change: 2.1.0 added the ENCODE_HTML_FORM response.
"""
if self.request.mode in BROWSER_REQUEST_MODES:
if self.fields.getOpenIDNamespace() == OPENID2_NS and \
len(self.encodeToURL()) > OPENID1_URL_LIMIT:
return ENCODE_HTML_FORM
else:
return ENCODE_URL
else:
return ENCODE_KVFORM |
def interpolate_nans_1d(x, y, kind='linear'):
"""Interpolate NaN values in y.
Interpolate NaN values in the y dimension. Works with unsorted x values.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
y : array-like
1-dimensional array of numeric y-values
kind : string
specifies the kind of interpolation x coordinate - 'linear' or 'log', optional.
Defaults to 'linear'.
Returns
-------
An array of the y coordinate data with NaN values interpolated.
"""
x_sort_args = np.argsort(x)
x = x[x_sort_args]
y = y[x_sort_args]
nans = np.isnan(y)
if kind == 'linear':
y[nans] = np.interp(x[nans], x[~nans], y[~nans])
elif kind == 'log':
y[nans] = np.interp(np.log(x[nans]), np.log(x[~nans]), y[~nans])
else:
raise ValueError('Unknown option for kind: {0}'.format(str(kind)))
return y[x_sort_args] | Interpolate NaN values in y.
Interpolate NaN values in the y dimension. Works with unsorted x values.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
y : array-like
1-dimensional array of numeric y-values
kind : string
specifies the kind of interpolation x coordinate - 'linear' or 'log', optional.
Defaults to 'linear'.
Returns
-------
An array of the y coordinate data with NaN values interpolated. | Below is the the instruction that describes the task:
### Input:
Interpolate NaN values in y.
Interpolate NaN values in the y dimension. Works with unsorted x values.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
y : array-like
1-dimensional array of numeric y-values
kind : string
specifies the kind of interpolation x coordinate - 'linear' or 'log', optional.
Defaults to 'linear'.
Returns
-------
An array of the y coordinate data with NaN values interpolated.
### Response:
def interpolate_nans_1d(x, y, kind='linear'):
"""Interpolate NaN values in y.
Interpolate NaN values in the y dimension. Works with unsorted x values.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
y : array-like
1-dimensional array of numeric y-values
kind : string
specifies the kind of interpolation x coordinate - 'linear' or 'log', optional.
Defaults to 'linear'.
Returns
-------
An array of the y coordinate data with NaN values interpolated.
"""
x_sort_args = np.argsort(x)
x = x[x_sort_args]
y = y[x_sort_args]
nans = np.isnan(y)
if kind == 'linear':
y[nans] = np.interp(x[nans], x[~nans], y[~nans])
elif kind == 'log':
y[nans] = np.interp(np.log(x[nans]), np.log(x[~nans]), y[~nans])
else:
raise ValueError('Unknown option for kind: {0}'.format(str(kind)))
return y[x_sort_args] |
def setMinimumPixmapSize(self, size):
"""
Sets the minimum pixmap size that will be displayed to the user
for the dock widget.
:param size | <int>
"""
self._minimumPixmapSize = size
position = self.position()
self._position = None
self.setPosition(position) | Sets the minimum pixmap size that will be displayed to the user
for the dock widget.
:param size | <int> | Below is the the instruction that describes the task:
### Input:
Sets the minimum pixmap size that will be displayed to the user
for the dock widget.
:param size | <int>
### Response:
def setMinimumPixmapSize(self, size):
"""
Sets the minimum pixmap size that will be displayed to the user
for the dock widget.
:param size | <int>
"""
self._minimumPixmapSize = size
position = self.position()
self._position = None
self.setPosition(position) |
def plugins_context(classes, category=None):
""" context manager to load plugin class(es) then unload on exit
Parameters
----------
classes: list
list of classes
category : None or str
if str, apply for single plugin category
Examples
--------
>>> from pprint import pprint
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> with plugins_context([DecoderPlugin]):
... pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
"""
original = {cat: list(_all_plugins[cat].keys()) for cat in _all_plugins}
load_plugin_classes(classes, category, overwrite=True)
# if errors:
# for cat in _all_plugins:
# for name, kls in list(_all_plugins[cat].items()):
# if name not in original[cat]:
# _all_plugins[cat].pop(name)
# raise RuntimeError(
# "errors occurred while loading plugins: {}".format(errors))
yield
for cat in _all_plugins:
for name, kls in list(_all_plugins[cat].items()):
if name not in original[cat]:
_all_plugins[cat].pop(name) | context manager to load plugin class(es) then unload on exit
Parameters
----------
classes: list
list of classes
category : None or str
if str, apply for single plugin category
Examples
--------
>>> from pprint import pprint
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> with plugins_context([DecoderPlugin]):
... pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}} | Below is the the instruction that describes the task:
### Input:
context manager to load plugin class(es) then unload on exit
Parameters
----------
classes: list
list of classes
category : None or str
if str, apply for single plugin category
Examples
--------
>>> from pprint import pprint
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> with plugins_context([DecoderPlugin]):
... pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
### Response:
def plugins_context(classes, category=None):
""" context manager to load plugin class(es) then unload on exit
Parameters
----------
classes: list
list of classes
category : None or str
if str, apply for single plugin category
Examples
--------
>>> from pprint import pprint
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> with plugins_context([DecoderPlugin]):
... pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
"""
original = {cat: list(_all_plugins[cat].keys()) for cat in _all_plugins}
load_plugin_classes(classes, category, overwrite=True)
# if errors:
# for cat in _all_plugins:
# for name, kls in list(_all_plugins[cat].items()):
# if name not in original[cat]:
# _all_plugins[cat].pop(name)
# raise RuntimeError(
# "errors occurred while loading plugins: {}".format(errors))
yield
for cat in _all_plugins:
for name, kls in list(_all_plugins[cat].items()):
if name not in original[cat]:
_all_plugins[cat].pop(name) |
def reload_napps(self, napps=None):
"""Reload a specific NApp or all Napps.
Args:
napp (list): NApp list to be reload.
Raises:
requests.HTTPError: When there's a server error.
"""
if napps is None:
napps = []
api = self._config.get('kytos', 'api')
endpoint = os.path.join(api, 'api', 'kytos', 'core', 'reload',
'all')
response = self.make_request(endpoint)
for napp in napps:
api = self._config.get('kytos', 'api')
endpoint = os.path.join(api, 'api', 'kytos', 'core', 'reload',
napp[0], napp[1])
response = self.make_request(endpoint)
if response.status_code != 200:
raise KytosException('Error reloading the napp: Module not founded'
' or could not be imported')
return response.content | Reload a specific NApp or all Napps.
Args:
napp (list): NApp list to be reload.
Raises:
requests.HTTPError: When there's a server error. | Below is the the instruction that describes the task:
### Input:
Reload a specific NApp or all Napps.
Args:
napp (list): NApp list to be reload.
Raises:
requests.HTTPError: When there's a server error.
### Response:
def reload_napps(self, napps=None):
"""Reload a specific NApp or all Napps.
Args:
napp (list): NApp list to be reload.
Raises:
requests.HTTPError: When there's a server error.
"""
if napps is None:
napps = []
api = self._config.get('kytos', 'api')
endpoint = os.path.join(api, 'api', 'kytos', 'core', 'reload',
'all')
response = self.make_request(endpoint)
for napp in napps:
api = self._config.get('kytos', 'api')
endpoint = os.path.join(api, 'api', 'kytos', 'core', 'reload',
napp[0], napp[1])
response = self.make_request(endpoint)
if response.status_code != 200:
raise KytosException('Error reloading the napp: Module not founded'
' or could not be imported')
return response.content |
def top_corner_label(self, label, position=None, rotation=0, offset=0.2,
**kwargs):
"""
Sets the label on the bottom axis.
Parameters
----------
label: String
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
"""
if not position:
position = (-offset / 2, 1 + offset, 0)
self._corner_labels["top"] = (label, position, rotation, kwargs) | Sets the label on the bottom axis.
Parameters
----------
label: String
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib. | Below is the the instruction that describes the task:
### Input:
Sets the label on the bottom axis.
Parameters
----------
label: String
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
### Response:
def top_corner_label(self, label, position=None, rotation=0, offset=0.2,
**kwargs):
"""
Sets the label on the bottom axis.
Parameters
----------
label: String
The axis label
position: 3-Tuple of floats, None
The position of the text label
rotation: float, 0
The angle of rotation of the label
offset: float,
Used to compute the distance of the label from the axis
kwargs:
Any kwargs to pass through to matplotlib.
"""
if not position:
position = (-offset / 2, 1 + offset, 0)
self._corner_labels["top"] = (label, position, rotation, kwargs) |
def record_set_absent(name, zone_name, resource_group, connection_auth=None):
'''
.. versionadded:: Fluorine
Ensure a record set does not exist in the DNS zone.
:param name:
Name of the record set.
:param zone_name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rec_set = __salt__['azurearm_dns.record_set_get'](
name,
zone_name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rec_set:
ret['result'] = True
ret['comment'] = 'Record set {0} was not found in zone {1}.'.format(name, zone_name)
return ret
elif __opts__['test']:
ret['comment'] = 'Record set {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rec_set,
'new': {},
}
return ret
deleted = __salt__['azurearm_dns.record_set_delete'](name, zone_name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Record set {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rec_set,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete record set {0}!'.format(name)
return ret | .. versionadded:: Fluorine
Ensure a record set does not exist in the DNS zone.
:param name:
Name of the record set.
:param zone_name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API. | Below is the the instruction that describes the task:
### Input:
.. versionadded:: Fluorine
Ensure a record set does not exist in the DNS zone.
:param name:
Name of the record set.
:param zone_name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
### Response:
def record_set_absent(name, zone_name, resource_group, connection_auth=None):
'''
.. versionadded:: Fluorine
Ensure a record set does not exist in the DNS zone.
:param name:
Name of the record set.
:param zone_name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rec_set = __salt__['azurearm_dns.record_set_get'](
name,
zone_name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rec_set:
ret['result'] = True
ret['comment'] = 'Record set {0} was not found in zone {1}.'.format(name, zone_name)
return ret
elif __opts__['test']:
ret['comment'] = 'Record set {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rec_set,
'new': {},
}
return ret
deleted = __salt__['azurearm_dns.record_set_delete'](name, zone_name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Record set {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rec_set,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete record set {0}!'.format(name)
return ret |
def format(self, record):
"""Print out any 'extra' data provided in logs."""
if hasattr(record, 'data'):
return "%s. DEBUG DATA=%s" % (
logging.Formatter.format(self, record),
record.__dict__['data'])
return logging.Formatter.format(self, record) | Print out any 'extra' data provided in logs. | Below is the the instruction that describes the task:
### Input:
Print out any 'extra' data provided in logs.
### Response:
def format(self, record):
"""Print out any 'extra' data provided in logs."""
if hasattr(record, 'data'):
return "%s. DEBUG DATA=%s" % (
logging.Formatter.format(self, record),
record.__dict__['data'])
return logging.Formatter.format(self, record) |
def set_col_min_width(self, x: int, min_width: int):
"""Sets a minimum width for blocks in the column with coordinate x."""
if x < 0:
raise IndexError('x < 0')
self._min_widths[x] = min_width | Sets a minimum width for blocks in the column with coordinate x. | Below is the the instruction that describes the task:
### Input:
Sets a minimum width for blocks in the column with coordinate x.
### Response:
def set_col_min_width(self, x: int, min_width: int):
"""Sets a minimum width for blocks in the column with coordinate x."""
if x < 0:
raise IndexError('x < 0')
self._min_widths[x] = min_width |
def init_cli(self, app):
"""
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
"""
# in case click isn't installed after all
if click is None:
raise RuntimeError('Cannot import click. Is it installed?')
# only add commands if we have a click context available
from .cli import add_commands
add_commands(app.cli, self) | Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0. | Below is the the instruction that describes the task:
### Input:
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
### Response:
def init_cli(self, app):
"""
Initialize the Flask CLI support in case it was enabled for the
app.
Works with both Flask>=1.0's CLI support as well as the backport
in the Flask-CLI package for Flask<1.0.
"""
# in case click isn't installed after all
if click is None:
raise RuntimeError('Cannot import click. Is it installed?')
# only add commands if we have a click context available
from .cli import add_commands
add_commands(app.cli, self) |
def commandline_to_list(self, cmdline_str, trigger_string):
'''
cmdline_str is the string of the command line
trigger_string is the trigger string, to be removed
'''
cmdline = salt.utils.args.shlex_split(cmdline_str[len(trigger_string):])
# Remove slack url parsing
# Translate target=<http://host.domain.net|host.domain.net>
# to target=host.domain.net
cmdlist = []
for cmditem in cmdline:
pattern = r'(?P<begin>.*)(<.*\|)(?P<url>.*)(>)(?P<remainder>.*)'
mtch = re.match(pattern, cmditem)
if mtch:
origtext = mtch.group('begin') + mtch.group('url') + mtch.group('remainder')
cmdlist.append(origtext)
else:
cmdlist.append(cmditem)
return cmdlist | cmdline_str is the string of the command line
trigger_string is the trigger string, to be removed | Below is the the instruction that describes the task:
### Input:
cmdline_str is the string of the command line
trigger_string is the trigger string, to be removed
### Response:
def commandline_to_list(self, cmdline_str, trigger_string):
'''
cmdline_str is the string of the command line
trigger_string is the trigger string, to be removed
'''
cmdline = salt.utils.args.shlex_split(cmdline_str[len(trigger_string):])
# Remove slack url parsing
# Translate target=<http://host.domain.net|host.domain.net>
# to target=host.domain.net
cmdlist = []
for cmditem in cmdline:
pattern = r'(?P<begin>.*)(<.*\|)(?P<url>.*)(>)(?P<remainder>.*)'
mtch = re.match(pattern, cmditem)
if mtch:
origtext = mtch.group('begin') + mtch.group('url') + mtch.group('remainder')
cmdlist.append(origtext)
else:
cmdlist.append(cmditem)
return cmdlist |
def loadStructuredPoints(filename):
"""Load a ``vtkStructuredPoints`` object from file and return an ``Actor(vtkActor)`` object.
.. hint:: |readStructuredPoints| |readStructuredPoints.py|_
"""
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(filename)
reader.Update()
gf = vtk.vtkImageDataGeometryFilter()
gf.SetInputConnection(reader.GetOutputPort())
gf.Update()
return Actor(gf.GetOutput()) | Load a ``vtkStructuredPoints`` object from file and return an ``Actor(vtkActor)`` object.
.. hint:: |readStructuredPoints| |readStructuredPoints.py|_ | Below is the the instruction that describes the task:
### Input:
Load a ``vtkStructuredPoints`` object from file and return an ``Actor(vtkActor)`` object.
.. hint:: |readStructuredPoints| |readStructuredPoints.py|_
### Response:
def loadStructuredPoints(filename):
"""Load a ``vtkStructuredPoints`` object from file and return an ``Actor(vtkActor)`` object.
.. hint:: |readStructuredPoints| |readStructuredPoints.py|_
"""
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(filename)
reader.Update()
gf = vtk.vtkImageDataGeometryFilter()
gf.SetInputConnection(reader.GetOutputPort())
gf.Update()
return Actor(gf.GetOutput()) |
def write(self, buf):
'''
Write buffer data into logger/stdout
'''
for line in buf.rstrip().splitlines():
self.orig_stdout.write(line.rstrip() + '\n')
self.orig_stdout.flush()
try:
self.logger.log(self.log_level, line.rstrip())
except Exception as e:
pass | Write buffer data into logger/stdout | Below is the the instruction that describes the task:
### Input:
Write buffer data into logger/stdout
### Response:
def write(self, buf):
'''
Write buffer data into logger/stdout
'''
for line in buf.rstrip().splitlines():
self.orig_stdout.write(line.rstrip() + '\n')
self.orig_stdout.flush()
try:
self.logger.log(self.log_level, line.rstrip())
except Exception as e:
pass |
def dumps_etree(pid, record, **kwargs):
"""Dump MARC21 compatible record.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:returns: A LXML Element instance.
"""
from dojson.contrib.to_marc21 import to_marc21
from dojson.contrib.to_marc21.utils import dumps_etree
return dumps_etree(to_marc21.do(record['_source']), **kwargs) | Dump MARC21 compatible record.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:returns: A LXML Element instance. | Below is the the instruction that describes the task:
### Input:
Dump MARC21 compatible record.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:returns: A LXML Element instance.
### Response:
def dumps_etree(pid, record, **kwargs):
"""Dump MARC21 compatible record.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:returns: A LXML Element instance.
"""
from dojson.contrib.to_marc21 import to_marc21
from dojson.contrib.to_marc21.utils import dumps_etree
return dumps_etree(to_marc21.do(record['_source']), **kwargs) |
def iter_parsed_values(self, field: Field) -> Iterable[Tuple[str, Any]]:
"""
Walk the dictionary of parsers and emit all non-null values.
"""
for key, func in self.parsers.items():
value = func(field)
if not value:
continue
yield key, value | Walk the dictionary of parsers and emit all non-null values. | Below is the the instruction that describes the task:
### Input:
Walk the dictionary of parsers and emit all non-null values.
### Response:
def iter_parsed_values(self, field: Field) -> Iterable[Tuple[str, Any]]:
"""
Walk the dictionary of parsers and emit all non-null values.
"""
for key, func in self.parsers.items():
value = func(field)
if not value:
continue
yield key, value |
def _convert_json_response_to_entities(response, property_resolver, require_encryption,
key_encryption_key, key_resolver):
''' Converts the response to tables class.
'''
if response is None or response.body is None:
return None
entities = _list()
entities.next_marker = _get_continuation_from_response_headers(response)
root = loads(response.body.decode('utf-8'))
if 'value' in root:
for entity in root['value']:
entity = _decrypt_and_deserialize_entity(entity, property_resolver, require_encryption,
key_encryption_key, key_resolver)
entities.append(entity)
else:
entities.append(_convert_json_to_entity(entity,
property_resolver))
return entities | Converts the response to tables class. | Below is the the instruction that describes the task:
### Input:
Converts the response to tables class.
### Response:
def _convert_json_response_to_entities(response, property_resolver, require_encryption,
key_encryption_key, key_resolver):
''' Converts the response to tables class.
'''
if response is None or response.body is None:
return None
entities = _list()
entities.next_marker = _get_continuation_from_response_headers(response)
root = loads(response.body.decode('utf-8'))
if 'value' in root:
for entity in root['value']:
entity = _decrypt_and_deserialize_entity(entity, property_resolver, require_encryption,
key_encryption_key, key_resolver)
entities.append(entity)
else:
entities.append(_convert_json_to_entity(entity,
property_resolver))
return entities |
def _add_child(self, collection, set, child):
"""Adds 'child' to 'collection', first checking 'set' to see if it's
already present."""
added = None
for c in child:
if c not in set:
set.add(c)
collection.append(c)
added = 1
if added:
self._children_reset() | Adds 'child' to 'collection', first checking 'set' to see if it's
already present. | Below is the the instruction that describes the task:
### Input:
Adds 'child' to 'collection', first checking 'set' to see if it's
already present.
### Response:
def _add_child(self, collection, set, child):
"""Adds 'child' to 'collection', first checking 'set' to see if it's
already present."""
added = None
for c in child:
if c not in set:
set.add(c)
collection.append(c)
added = 1
if added:
self._children_reset() |
def from_string(cls, serialized):
"""Deprecated. Use :meth:`locator.CourseLocator.from_string`."""
warnings.warn(
"SlashSeparatedCourseKey is deprecated! Please use locator.CourseLocator",
DeprecationWarning,
stacklevel=2
)
return CourseLocator.from_string(serialized) | Deprecated. Use :meth:`locator.CourseLocator.from_string`. | Below is the the instruction that describes the task:
### Input:
Deprecated. Use :meth:`locator.CourseLocator.from_string`.
### Response:
def from_string(cls, serialized):
"""Deprecated. Use :meth:`locator.CourseLocator.from_string`."""
warnings.warn(
"SlashSeparatedCourseKey is deprecated! Please use locator.CourseLocator",
DeprecationWarning,
stacklevel=2
)
return CourseLocator.from_string(serialized) |
def get_reference_section_beginning(fulltext):
"""Get start of reference section."""
sect_start = {
'start_line': None,
'end_line': None,
'title_string': None,
'marker_pattern': None,
'marker': None,
'how_found_start': None,
}
# Find start of refs section:
sect_start = find_reference_section(fulltext)
if sect_start is not None:
sect_start['how_found_start'] = 1
else:
# No references found - try with no title option
sect_start = find_reference_section_no_title_via_brackets(fulltext)
if sect_start is not None:
sect_start['how_found_start'] = 2
# Try weaker set of patterns if needed
if sect_start is None:
# No references found - try with no title option (with weaker
# patterns..)
sect_start = find_reference_section_no_title_via_dots(fulltext)
if sect_start is not None:
sect_start['how_found_start'] = 3
if sect_start is None:
# No references found - try with no title option (with even
# weaker patterns..)
sect_start = find_reference_section_no_title_via_numbers(
fulltext)
if sect_start is not None:
sect_start['how_found_start'] = 4
if sect_start:
current_app.logger.debug('* title %r' % sect_start['title_string'])
current_app.logger.debug('* marker %r' % sect_start['marker'])
current_app.logger.debug('* title_marker_same_line %s'
% sect_start['title_marker_same_line'])
else:
current_app.logger.debug('* could not find references section')
return sect_start | Get start of reference section. | Below is the the instruction that describes the task:
### Input:
Get start of reference section.
### Response:
def get_reference_section_beginning(fulltext):
"""Get start of reference section."""
sect_start = {
'start_line': None,
'end_line': None,
'title_string': None,
'marker_pattern': None,
'marker': None,
'how_found_start': None,
}
# Find start of refs section:
sect_start = find_reference_section(fulltext)
if sect_start is not None:
sect_start['how_found_start'] = 1
else:
# No references found - try with no title option
sect_start = find_reference_section_no_title_via_brackets(fulltext)
if sect_start is not None:
sect_start['how_found_start'] = 2
# Try weaker set of patterns if needed
if sect_start is None:
# No references found - try with no title option (with weaker
# patterns..)
sect_start = find_reference_section_no_title_via_dots(fulltext)
if sect_start is not None:
sect_start['how_found_start'] = 3
if sect_start is None:
# No references found - try with no title option (with even
# weaker patterns..)
sect_start = find_reference_section_no_title_via_numbers(
fulltext)
if sect_start is not None:
sect_start['how_found_start'] = 4
if sect_start:
current_app.logger.debug('* title %r' % sect_start['title_string'])
current_app.logger.debug('* marker %r' % sect_start['marker'])
current_app.logger.debug('* title_marker_same_line %s'
% sect_start['title_marker_same_line'])
else:
current_app.logger.debug('* could not find references section')
return sect_start |
def show_time_as_short_string(self, seconds):
"""
converts seconds to a string in terms of
seconds -> years to show complexity of algorithm
"""
if seconds < 60:
return str(seconds) + ' seconds'
elif seconds < 3600:
return str(round(seconds/60, 1)) + ' minutes'
elif seconds < 3600*24:
return str(round(seconds/(60*24), 1)) + ' hours'
elif seconds < 3600*24*365:
return str(round(seconds/(3600*24), 1)) + ' days'
else:
print('WARNING - this will take ' + str(seconds/(60*24*365)) + ' YEARS to run' )
return str(round(seconds/(60*24*365), 1)) + ' years' | converts seconds to a string in terms of
seconds -> years to show complexity of algorithm | Below is the the instruction that describes the task:
### Input:
converts seconds to a string in terms of
seconds -> years to show complexity of algorithm
### Response:
def show_time_as_short_string(self, seconds):
"""
converts seconds to a string in terms of
seconds -> years to show complexity of algorithm
"""
if seconds < 60:
return str(seconds) + ' seconds'
elif seconds < 3600:
return str(round(seconds/60, 1)) + ' minutes'
elif seconds < 3600*24:
return str(round(seconds/(60*24), 1)) + ' hours'
elif seconds < 3600*24*365:
return str(round(seconds/(3600*24), 1)) + ' days'
else:
print('WARNING - this will take ' + str(seconds/(60*24*365)) + ' YEARS to run' )
return str(round(seconds/(60*24*365), 1)) + ' years' |
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the encoder if such a restriction exists.
"""
max_seq_len = min((encoder.get_max_seq_len()
for encoder in self.encoders if encoder.get_max_seq_len() is not None), default=None)
return max_seq_len | :return: The maximum length supported by the encoder if such a restriction exists. | Below is the the instruction that describes the task:
### Input:
:return: The maximum length supported by the encoder if such a restriction exists.
### Response:
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the encoder if such a restriction exists.
"""
max_seq_len = min((encoder.get_max_seq_len()
for encoder in self.encoders if encoder.get_max_seq_len() is not None), default=None)
return max_seq_len |
def _css_select(soup, css_selector):
""" Returns the content of the element pointed by the CSS selector,
or an empty string if not found """
selection = soup.select(css_selector)
if len(selection) > 0:
if hasattr(selection[0], 'text'):
retour = selection[0].text.strip()
else:
retour = ""
else:
retour = ""
return retour | Returns the content of the element pointed by the CSS selector,
or an empty string if not found | Below is the the instruction that describes the task:
### Input:
Returns the content of the element pointed by the CSS selector,
or an empty string if not found
### Response:
def _css_select(soup, css_selector):
""" Returns the content of the element pointed by the CSS selector,
or an empty string if not found """
selection = soup.select(css_selector)
if len(selection) > 0:
if hasattr(selection[0], 'text'):
retour = selection[0].text.strip()
else:
retour = ""
else:
retour = ""
return retour |
def from_polar(r, theta, phi):
"""Convert ``(r, theta, phi)`` to Cartesian coordinates ``[x y z]``.
``r`` - vector length
``theta`` - angle above (+) or below (-) the xy-plane
``phi`` - angle around the z-axis
The meaning and order of the three polar parameters is designed to
match both ISO 31-11 and the traditional order used by physicists.
Mathematicians usually define ``theta`` and ``phi`` the other way
around, and may need to use caution when calling this function.
See: https://en.wikipedia.org/wiki/Spherical_coordinate_system
"""
rxy = r * cos(theta)
return array((rxy * cos(phi), rxy * sin(phi), r * sin(theta))) | Convert ``(r, theta, phi)`` to Cartesian coordinates ``[x y z]``.
``r`` - vector length
``theta`` - angle above (+) or below (-) the xy-plane
``phi`` - angle around the z-axis
The meaning and order of the three polar parameters is designed to
match both ISO 31-11 and the traditional order used by physicists.
Mathematicians usually define ``theta`` and ``phi`` the other way
around, and may need to use caution when calling this function.
See: https://en.wikipedia.org/wiki/Spherical_coordinate_system | Below is the the instruction that describes the task:
### Input:
Convert ``(r, theta, phi)`` to Cartesian coordinates ``[x y z]``.
``r`` - vector length
``theta`` - angle above (+) or below (-) the xy-plane
``phi`` - angle around the z-axis
The meaning and order of the three polar parameters is designed to
match both ISO 31-11 and the traditional order used by physicists.
Mathematicians usually define ``theta`` and ``phi`` the other way
around, and may need to use caution when calling this function.
See: https://en.wikipedia.org/wiki/Spherical_coordinate_system
### Response:
def from_polar(r, theta, phi):
"""Convert ``(r, theta, phi)`` to Cartesian coordinates ``[x y z]``.
``r`` - vector length
``theta`` - angle above (+) or below (-) the xy-plane
``phi`` - angle around the z-axis
The meaning and order of the three polar parameters is designed to
match both ISO 31-11 and the traditional order used by physicists.
Mathematicians usually define ``theta`` and ``phi`` the other way
around, and may need to use caution when calling this function.
See: https://en.wikipedia.org/wiki/Spherical_coordinate_system
"""
rxy = r * cos(theta)
return array((rxy * cos(phi), rxy * sin(phi), r * sin(theta))) |
def register_gym_env(class_entry_point, version="v0", kwargs=None):
"""Registers the class in Gym and returns the registered name and the env."""
split_on_colon = class_entry_point.split(":")
assert len(split_on_colon) == 2
class_name = split_on_colon[1]
# We have to add the version to conform to gym's API.
env_name = "T2TEnv-{}-{}".format(class_name, version)
gym.envs.register(id=env_name, entry_point=class_entry_point, kwargs=kwargs)
tf.logging.info("Entry Point [%s] registered with id [%s]", class_entry_point,
env_name)
return env_name, gym.make(env_name) | Registers the class in Gym and returns the registered name and the env. | Below is the the instruction that describes the task:
### Input:
Registers the class in Gym and returns the registered name and the env.
### Response:
def register_gym_env(class_entry_point, version="v0", kwargs=None):
"""Registers the class in Gym and returns the registered name and the env."""
split_on_colon = class_entry_point.split(":")
assert len(split_on_colon) == 2
class_name = split_on_colon[1]
# We have to add the version to conform to gym's API.
env_name = "T2TEnv-{}-{}".format(class_name, version)
gym.envs.register(id=env_name, entry_point=class_entry_point, kwargs=kwargs)
tf.logging.info("Entry Point [%s] registered with id [%s]", class_entry_point,
env_name)
return env_name, gym.make(env_name) |
def to_frame(self):
"""Convert the object to its frame format."""
self.sanitize()
res = []
res.append((1 << 7) + (1 << 4) + (self.repeat << 5) +
(self.priority << 2))
res.append(self.src_addr >> 8)
res.append(self.src_addr % 0x100)
res.append(self.dst_addr >> 8)
res.append(self.dst_addr % 0x100)
res.append((self.multicast << 7) + (self.routing << 4) + self.length)
for i in range(0, self.length - 1):
res.append(self.data[i])
checksum = 0
for i in range(0, 5 + self.length):
checksum += res[i]
res.append(checksum % 0x100)
return bytearray(res) | Convert the object to its frame format. | Below is the the instruction that describes the task:
### Input:
Convert the object to its frame format.
### Response:
def to_frame(self):
"""Convert the object to its frame format."""
self.sanitize()
res = []
res.append((1 << 7) + (1 << 4) + (self.repeat << 5) +
(self.priority << 2))
res.append(self.src_addr >> 8)
res.append(self.src_addr % 0x100)
res.append(self.dst_addr >> 8)
res.append(self.dst_addr % 0x100)
res.append((self.multicast << 7) + (self.routing << 4) + self.length)
for i in range(0, self.length - 1):
res.append(self.data[i])
checksum = 0
for i in range(0, 5 + self.length):
checksum += res[i]
res.append(checksum % 0x100)
return bytearray(res) |
def get_output(self, assets_by_taxo, haz, rlzi=None):
"""
:param assets_by_taxo: a dictionary taxonomy index -> assets on a site
:param haz: an array or a dictionary of hazard on that site
:param rlzi: if given, a realization index
"""
if isinstance(haz, numpy.ndarray):
# NB: in GMF-based calculations the order in which
# the gmfs are stored is random since it depends on
# which hazard task ends first; here we reorder
# the gmfs by event ID; this is convenient in
# general and mandatory for the case of
# VulnerabilityFunctionWithPMF, otherwise the
# sample method would receive the means in random
# order and produce random results even if the
# seed is set correctly; very tricky indeed! (MS)
haz.sort(order='eid')
eids = haz['eid']
data = haz['gmv'] # shape (E, M)
elif not haz: # no hazard for this site
eids = numpy.arange(1)
data = []
else: # classical
eids = []
data = haz # shape M
dic = dict(eids=eids)
if rlzi is not None:
dic['rlzi'] = rlzi
for l, lt in enumerate(self.loss_types):
ls = []
for taxonomy, assets_ in assets_by_taxo.items():
if len(assets_by_taxo.eps):
epsilons = assets_by_taxo.eps[taxonomy][:, eids]
else: # no CoVs
epsilons = ()
rm = self[taxonomy]
if len(data) == 0:
dat = [0]
elif len(eids): # gmfs
dat = data[:, rm.imti[lt]]
else: # hcurves
dat = data[rm.imti[lt]]
ls.append(rm(lt, assets_, dat, eids, epsilons))
arr = numpy.concatenate(ls)
dic[lt] = arr[assets_by_taxo.idxs] if len(arr) else arr
return hdf5.ArrayWrapper((), dic) | :param assets_by_taxo: a dictionary taxonomy index -> assets on a site
:param haz: an array or a dictionary of hazard on that site
:param rlzi: if given, a realization index | Below is the the instruction that describes the task:
### Input:
:param assets_by_taxo: a dictionary taxonomy index -> assets on a site
:param haz: an array or a dictionary of hazard on that site
:param rlzi: if given, a realization index
### Response:
def get_output(self, assets_by_taxo, haz, rlzi=None):
"""
:param assets_by_taxo: a dictionary taxonomy index -> assets on a site
:param haz: an array or a dictionary of hazard on that site
:param rlzi: if given, a realization index
"""
if isinstance(haz, numpy.ndarray):
# NB: in GMF-based calculations the order in which
# the gmfs are stored is random since it depends on
# which hazard task ends first; here we reorder
# the gmfs by event ID; this is convenient in
# general and mandatory for the case of
# VulnerabilityFunctionWithPMF, otherwise the
# sample method would receive the means in random
# order and produce random results even if the
# seed is set correctly; very tricky indeed! (MS)
haz.sort(order='eid')
eids = haz['eid']
data = haz['gmv'] # shape (E, M)
elif not haz: # no hazard for this site
eids = numpy.arange(1)
data = []
else: # classical
eids = []
data = haz # shape M
dic = dict(eids=eids)
if rlzi is not None:
dic['rlzi'] = rlzi
for l, lt in enumerate(self.loss_types):
ls = []
for taxonomy, assets_ in assets_by_taxo.items():
if len(assets_by_taxo.eps):
epsilons = assets_by_taxo.eps[taxonomy][:, eids]
else: # no CoVs
epsilons = ()
rm = self[taxonomy]
if len(data) == 0:
dat = [0]
elif len(eids): # gmfs
dat = data[:, rm.imti[lt]]
else: # hcurves
dat = data[rm.imti[lt]]
ls.append(rm(lt, assets_, dat, eids, epsilons))
arr = numpy.concatenate(ls)
dic[lt] = arr[assets_by_taxo.idxs] if len(arr) else arr
return hdf5.ArrayWrapper((), dic) |
def bind(self, instance, auto=False):
"""
Bind deps to instance
:param instance:
:param auto: follow update of DI and refresh binds once we will get something new
:return:
"""
methods = [
(m, cls.__dict__[m])
for cls in inspect.getmro(type(instance))
for m in cls.__dict__ if inspect.isfunction(cls.__dict__[m])
]
try:
deps_of_endpoints = [(method_ptr, self.entrypoint_deps(method_ptr))
for (method_name, method_ptr) in methods]
for (method_ptr, method_deps) in deps_of_endpoints:
if len(method_deps) > 0:
method_ptr(instance, **method_deps)
except KeyError:
pass
if auto and instance not in self.current_scope.get_auto_bind_list():
self.current_scope.auto_bind(instance)
return instance | Bind deps to instance
:param instance:
:param auto: follow update of DI and refresh binds once we will get something new
:return: | Below is the the instruction that describes the task:
### Input:
Bind deps to instance
:param instance:
:param auto: follow update of DI and refresh binds once we will get something new
:return:
### Response:
def bind(self, instance, auto=False):
"""
Bind deps to instance
:param instance:
:param auto: follow update of DI and refresh binds once we will get something new
:return:
"""
methods = [
(m, cls.__dict__[m])
for cls in inspect.getmro(type(instance))
for m in cls.__dict__ if inspect.isfunction(cls.__dict__[m])
]
try:
deps_of_endpoints = [(method_ptr, self.entrypoint_deps(method_ptr))
for (method_name, method_ptr) in methods]
for (method_ptr, method_deps) in deps_of_endpoints:
if len(method_deps) > 0:
method_ptr(instance, **method_deps)
except KeyError:
pass
if auto and instance not in self.current_scope.get_auto_bind_list():
self.current_scope.auto_bind(instance)
return instance |
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label) | Compile the Theano functions for evaluating and updating our model. | Below is the the instruction that describes the task:
### Input:
Compile the Theano functions for evaluating and updating our model.
### Response:
def _compile(self, **kwargs):
'''Compile the Theano functions for evaluating and updating our model.
'''
util.log('compiling evaluation function')
self.f_eval = theano.function(self._inputs,
self._monitor_exprs,
updates=self._updates,
name='evaluation')
label = self.__class__.__name__
util.log('compiling {} optimizer'.format(click.style(label, fg='red')))
updates = list(self._updates) + list(self.get_updates(**kwargs))
self.f_step = theano.function(self._inputs,
self._monitor_exprs,
updates=updates,
name=label) |
def add_atype(self, ):
"""Add a atype and store it in the self.atypes
:returns: None
:rtype: None
:raises: None
"""
i = self.atype_tablev.currentIndex()
item = i.internalPointer()
if item:
atype = item.internal_data()
atype.projects.add(self._project)
self.atypes.append(atype)
item.set_parent(None) | Add a atype and store it in the self.atypes
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Add a atype and store it in the self.atypes
:returns: None
:rtype: None
:raises: None
### Response:
def add_atype(self, ):
"""Add a atype and store it in the self.atypes
:returns: None
:rtype: None
:raises: None
"""
i = self.atype_tablev.currentIndex()
item = i.internalPointer()
if item:
atype = item.internal_data()
atype.projects.add(self._project)
self.atypes.append(atype)
item.set_parent(None) |
def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a managed disk from a resource group.
'''
compconn = get_conn(client_type='compute')
try:
compconn.disks.delete(kwargs['resource_group'], kwargs['blob'])
except Exception as exc:
log.error('Error deleting managed disk %s - %s', kwargs.get('blob'), six.text_type(exc))
return False
return True | Delete a managed disk from a resource group. | Below is the the instruction that describes the task:
### Input:
Delete a managed disk from a resource group.
### Response:
def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a managed disk from a resource group.
'''
compconn = get_conn(client_type='compute')
try:
compconn.disks.delete(kwargs['resource_group'], kwargs['blob'])
except Exception as exc:
log.error('Error deleting managed disk %s - %s', kwargs.get('blob'), six.text_type(exc))
return False
return True |
def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self | Similar to profile.Profile.runctx . | Below is the the instruction that describes the task:
### Input:
Similar to profile.Profile.runctx .
### Response:
def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self |
def ovsdb_server_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(ovsdb_server, "name")
name_key.text = kwargs.pop('name')
method = ET.SubElement(ovsdb_server, "method")
method.text = kwargs.pop('method')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ovsdb_server_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(ovsdb_server, "name")
name_key.text = kwargs.pop('name')
method = ET.SubElement(ovsdb_server, "method")
method.text = kwargs.pop('method')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def forward_tcp(self, host, port):
"""Open a connection to host:port via an ssh tunnel.
Args:
host (str): The host to connect to.
port (int): The port to connect to.
Returns:
A socket-like object that is connected to the provided host:port.
"""
return self.transport.open_channel(
'direct-tcpip',
(host, port),
self.transport.getpeername()
) | Open a connection to host:port via an ssh tunnel.
Args:
host (str): The host to connect to.
port (int): The port to connect to.
Returns:
A socket-like object that is connected to the provided host:port. | Below is the the instruction that describes the task:
### Input:
Open a connection to host:port via an ssh tunnel.
Args:
host (str): The host to connect to.
port (int): The port to connect to.
Returns:
A socket-like object that is connected to the provided host:port.
### Response:
def forward_tcp(self, host, port):
"""Open a connection to host:port via an ssh tunnel.
Args:
host (str): The host to connect to.
port (int): The port to connect to.
Returns:
A socket-like object that is connected to the provided host:port.
"""
return self.transport.open_channel(
'direct-tcpip',
(host, port),
self.transport.getpeername()
) |
def mutate(self, info_in):
"""Replicate an info + mutation.
To mutate an info, that info must have a method called
``_mutated_contents``.
"""
# check self is not failed
if self.failed:
raise ValueError("{} cannot mutate as it has failed.".format(self))
from transformations import Mutation
info_out = type(info_in)(origin=self,
contents=info_in._mutated_contents())
Mutation(info_in=info_in, info_out=info_out) | Replicate an info + mutation.
To mutate an info, that info must have a method called
``_mutated_contents``. | Below is the the instruction that describes the task:
### Input:
Replicate an info + mutation.
To mutate an info, that info must have a method called
``_mutated_contents``.
### Response:
def mutate(self, info_in):
"""Replicate an info + mutation.
To mutate an info, that info must have a method called
``_mutated_contents``.
"""
# check self is not failed
if self.failed:
raise ValueError("{} cannot mutate as it has failed.".format(self))
from transformations import Mutation
info_out = type(info_in)(origin=self,
contents=info_in._mutated_contents())
Mutation(info_in=info_in, info_out=info_out) |
def get_proxy_for_requests(self, url):
"""
Get proxy configuration for a given URL, in a form ready to use with the Requests library.
:param str url: The URL for which to obtain proxy configuration.
:returns: Proxy configuration in a form recognized by Requests, for use with the ``proxies`` parameter.
:rtype: dict
:raises ProxyConfigExhaustedError: If no proxy is configured or available,
and 'DIRECT' is not configured as a fallback.
"""
proxy = self.get_proxy(url)
if not proxy:
raise ProxyConfigExhaustedError(url)
return proxy_parameter_for_requests(proxy) | Get proxy configuration for a given URL, in a form ready to use with the Requests library.
:param str url: The URL for which to obtain proxy configuration.
:returns: Proxy configuration in a form recognized by Requests, for use with the ``proxies`` parameter.
:rtype: dict
:raises ProxyConfigExhaustedError: If no proxy is configured or available,
and 'DIRECT' is not configured as a fallback. | Below is the the instruction that describes the task:
### Input:
Get proxy configuration for a given URL, in a form ready to use with the Requests library.
:param str url: The URL for which to obtain proxy configuration.
:returns: Proxy configuration in a form recognized by Requests, for use with the ``proxies`` parameter.
:rtype: dict
:raises ProxyConfigExhaustedError: If no proxy is configured or available,
and 'DIRECT' is not configured as a fallback.
### Response:
def get_proxy_for_requests(self, url):
"""
Get proxy configuration for a given URL, in a form ready to use with the Requests library.
:param str url: The URL for which to obtain proxy configuration.
:returns: Proxy configuration in a form recognized by Requests, for use with the ``proxies`` parameter.
:rtype: dict
:raises ProxyConfigExhaustedError: If no proxy is configured or available,
and 'DIRECT' is not configured as a fallback.
"""
proxy = self.get_proxy(url)
if not proxy:
raise ProxyConfigExhaustedError(url)
return proxy_parameter_for_requests(proxy) |
def _read_varint(self):
"""Read exactly a varint out of the underlying file."""
buf = self._read(8)
(n, l) = _DecodeVarint(buf, 0)
self._unread(buf[l:])
return n | Read exactly a varint out of the underlying file. | Below is the the instruction that describes the task:
### Input:
Read exactly a varint out of the underlying file.
### Response:
def _read_varint(self):
"""Read exactly a varint out of the underlying file."""
buf = self._read(8)
(n, l) = _DecodeVarint(buf, 0)
self._unread(buf[l:])
return n |
def eeg_add_channel(raw, channel, sync_index_eeg=0, sync_index_channel=0, channel_type=None, channel_name=None):
"""
Add a channel to a mne's Raw m/eeg file. It will basically synchronize the channel to the eeg data following a particular index and add it.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel : list or numpy.array
The channel to be added.
sync_index_eeg : int or list
An index, in the raw data, by which to align the two inputs.
sync_index_channel : int or list
An index, in the channel to add, by which to align the two inputs.
channel_type : str
Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'.
Returns
----------
raw : mne.io.Raw
Raw data in FIF format.
Example
----------
>>> import neurokit as nk
>>> event_index_in_eeg = 42
>>> event_index_in_ecg = 666
>>> raw = nk.eeg_add_channel(raw, ecg, sync_index_raw=event_index_in_eeg, sync_index_channel=event_index_in_ecg, channel_type="ecg")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- mne
*See Also*
- mne: http://martinos.org/mne/dev/index.html
"""
if channel_name is None:
if isinstance(channel, pd.core.series.Series):
if channel.name is not None:
channel_name = channel.name
else:
channel_name = "Added_Channel"
else:
channel_name = "Added_Channel"
# Compute the distance between the two signals
diff = sync_index_channel - sync_index_eeg
if diff > 0:
channel = list(channel)[diff:len(channel)]
channel = channel + [np.nan]*diff
if diff < 0:
channel = [np.nan]*diff + list(channel)
channel = list(channel)[0:len(channel)]
# Adjust to raw size
if len(channel) < len(raw):
channel = list(channel) + [np.nan]*(len(raw)-len(channel))
else:
channel = list(channel)[0:len(raw)] # Crop to fit the raw data
info = mne.create_info([channel_name], raw.info["sfreq"], ch_types=channel_type)
channel = mne.io.RawArray([channel], info)
raw.add_channels([channel], force_update_info=True)
return(raw) | Add a channel to a mne's Raw m/eeg file. It will basically synchronize the channel to the eeg data following a particular index and add it.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel : list or numpy.array
The channel to be added.
sync_index_eeg : int or list
An index, in the raw data, by which to align the two inputs.
sync_index_channel : int or list
An index, in the channel to add, by which to align the two inputs.
channel_type : str
Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'.
Returns
----------
raw : mne.io.Raw
Raw data in FIF format.
Example
----------
>>> import neurokit as nk
>>> event_index_in_eeg = 42
>>> event_index_in_ecg = 666
>>> raw = nk.eeg_add_channel(raw, ecg, sync_index_raw=event_index_in_eeg, sync_index_channel=event_index_in_ecg, channel_type="ecg")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- mne
*See Also*
- mne: http://martinos.org/mne/dev/index.html | Below is the the instruction that describes the task:
### Input:
Add a channel to a mne's Raw m/eeg file. It will basically synchronize the channel to the eeg data following a particular index and add it.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel : list or numpy.array
The channel to be added.
sync_index_eeg : int or list
An index, in the raw data, by which to align the two inputs.
sync_index_channel : int or list
An index, in the channel to add, by which to align the two inputs.
channel_type : str
Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'.
Returns
----------
raw : mne.io.Raw
Raw data in FIF format.
Example
----------
>>> import neurokit as nk
>>> event_index_in_eeg = 42
>>> event_index_in_ecg = 666
>>> raw = nk.eeg_add_channel(raw, ecg, sync_index_raw=event_index_in_eeg, sync_index_channel=event_index_in_ecg, channel_type="ecg")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- mne
*See Also*
- mne: http://martinos.org/mne/dev/index.html
### Response:
def eeg_add_channel(raw, channel, sync_index_eeg=0, sync_index_channel=0, channel_type=None, channel_name=None):
"""
Add a channel to a mne's Raw m/eeg file. It will basically synchronize the channel to the eeg data following a particular index and add it.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel : list or numpy.array
The channel to be added.
sync_index_eeg : int or list
An index, in the raw data, by which to align the two inputs.
sync_index_channel : int or list
An index, in the channel to add, by which to align the two inputs.
channel_type : str
Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'.
Returns
----------
raw : mne.io.Raw
Raw data in FIF format.
Example
----------
>>> import neurokit as nk
>>> event_index_in_eeg = 42
>>> event_index_in_ecg = 666
>>> raw = nk.eeg_add_channel(raw, ecg, sync_index_raw=event_index_in_eeg, sync_index_channel=event_index_in_ecg, channel_type="ecg")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- mne
*See Also*
- mne: http://martinos.org/mne/dev/index.html
"""
if channel_name is None:
if isinstance(channel, pd.core.series.Series):
if channel.name is not None:
channel_name = channel.name
else:
channel_name = "Added_Channel"
else:
channel_name = "Added_Channel"
# Compute the distance between the two signals
diff = sync_index_channel - sync_index_eeg
if diff > 0:
channel = list(channel)[diff:len(channel)]
channel = channel + [np.nan]*diff
if diff < 0:
channel = [np.nan]*diff + list(channel)
channel = list(channel)[0:len(channel)]
# Adjust to raw size
if len(channel) < len(raw):
channel = list(channel) + [np.nan]*(len(raw)-len(channel))
else:
channel = list(channel)[0:len(raw)] # Crop to fit the raw data
info = mne.create_info([channel_name], raw.info["sfreq"], ch_types=channel_type)
channel = mne.io.RawArray([channel], info)
raw.add_channels([channel], force_update_info=True)
return(raw) |
def sparseHealpixFiles(title, infiles, field='MAGLIM',**kwargs):
"""
Inputs: field
"""
#map = ugali.utils.skymap.readSparseHealpixMaps(infiles,field)
map = ugali.utils.skymap.read_partial_map(infiles,field)
ax = hp.mollview(map=map, title=title, **kwargs)
return ax, map | Inputs: field | Below is the the instruction that describes the task:
### Input:
Inputs: field
### Response:
def sparseHealpixFiles(title, infiles, field='MAGLIM',**kwargs):
"""
Inputs: field
"""
#map = ugali.utils.skymap.readSparseHealpixMaps(infiles,field)
map = ugali.utils.skymap.read_partial_map(infiles,field)
ax = hp.mollview(map=map, title=title, **kwargs)
return ax, map |
def edit(self, **kwargs):
""" Change this Job's name.
This will affect the historical data available for this
Job, e.g. past run logs will no longer be accessible.
"""
logger.debug('Job {0} changing name to {1}'.format(self.name, kwargs.get('name')))
if not self.state.allow_edit_job:
raise DagobahError('job cannot be edited in its current state')
if 'name' in kwargs and isinstance(kwargs['name'], str):
if not self.parent._name_is_available(kwargs['name']):
raise DagobahError('new job name %s is not available' %
kwargs['name'])
for key in ['name']:
if key in kwargs and isinstance(kwargs[key], str):
setattr(self, key, kwargs[key])
self.parent.commit(cascade=True) | Change this Job's name.
This will affect the historical data available for this
Job, e.g. past run logs will no longer be accessible. | Below is the the instruction that describes the task:
### Input:
Change this Job's name.
This will affect the historical data available for this
Job, e.g. past run logs will no longer be accessible.
### Response:
def edit(self, **kwargs):
""" Change this Job's name.
This will affect the historical data available for this
Job, e.g. past run logs will no longer be accessible.
"""
logger.debug('Job {0} changing name to {1}'.format(self.name, kwargs.get('name')))
if not self.state.allow_edit_job:
raise DagobahError('job cannot be edited in its current state')
if 'name' in kwargs and isinstance(kwargs['name'], str):
if not self.parent._name_is_available(kwargs['name']):
raise DagobahError('new job name %s is not available' %
kwargs['name'])
for key in ['name']:
if key in kwargs and isinstance(kwargs[key], str):
setattr(self, key, kwargs[key])
self.parent.commit(cascade=True) |
def expected_number_of_transactions_in_first_n_periods(self, n):
r"""
Return expected number of transactions in first n n_periods.
Expected number of transactions occurring across first n transaction
opportunities.
Used by Fader and Hardie to assess in-sample fit.
.. math:: Pr(X(n) = x| \alpha, \beta, \gamma, \delta)
See (7) in Fader & Hardie 2010.
Parameters
----------
n: float
number of transaction opportunities
Returns
-------
DataFrame:
Predicted values, indexed by x
"""
params = self._unload_params("alpha", "beta", "gamma", "delta")
alpha, beta, gamma, delta = params
x_counts = self.data.groupby("frequency")["weights"].sum()
x = np.asarray(x_counts.index)
p1 = binom(n, x) * exp(
betaln(alpha + x, beta + n - x) - betaln(alpha, beta) + betaln(gamma, delta + n) - betaln(gamma, delta)
)
I = np.arange(x.min(), n)
@np.vectorize
def p2(j, x):
i = I[int(j) :]
return np.sum(
binom(i, x)
* exp(
betaln(alpha + x, beta + i - x)
- betaln(alpha, beta)
+ betaln(gamma + 1, delta + i)
- betaln(gamma, delta)
)
)
p1 += np.fromfunction(p2, (x.shape[0],), x=x)
idx = pd.Index(x, name="frequency")
return DataFrame(p1 * x_counts.sum(), index=idx, columns=["model"]) | r"""
Return expected number of transactions in first n n_periods.
Expected number of transactions occurring across first n transaction
opportunities.
Used by Fader and Hardie to assess in-sample fit.
.. math:: Pr(X(n) = x| \alpha, \beta, \gamma, \delta)
See (7) in Fader & Hardie 2010.
Parameters
----------
n: float
number of transaction opportunities
Returns
-------
DataFrame:
Predicted values, indexed by x | Below is the the instruction that describes the task:
### Input:
r"""
Return expected number of transactions in first n n_periods.
Expected number of transactions occurring across first n transaction
opportunities.
Used by Fader and Hardie to assess in-sample fit.
.. math:: Pr(X(n) = x| \alpha, \beta, \gamma, \delta)
See (7) in Fader & Hardie 2010.
Parameters
----------
n: float
number of transaction opportunities
Returns
-------
DataFrame:
Predicted values, indexed by x
### Response:
def expected_number_of_transactions_in_first_n_periods(self, n):
r"""
Return expected number of transactions in first n n_periods.
Expected number of transactions occurring across first n transaction
opportunities.
Used by Fader and Hardie to assess in-sample fit.
.. math:: Pr(X(n) = x| \alpha, \beta, \gamma, \delta)
See (7) in Fader & Hardie 2010.
Parameters
----------
n: float
number of transaction opportunities
Returns
-------
DataFrame:
Predicted values, indexed by x
"""
params = self._unload_params("alpha", "beta", "gamma", "delta")
alpha, beta, gamma, delta = params
x_counts = self.data.groupby("frequency")["weights"].sum()
x = np.asarray(x_counts.index)
p1 = binom(n, x) * exp(
betaln(alpha + x, beta + n - x) - betaln(alpha, beta) + betaln(gamma, delta + n) - betaln(gamma, delta)
)
I = np.arange(x.min(), n)
@np.vectorize
def p2(j, x):
i = I[int(j) :]
return np.sum(
binom(i, x)
* exp(
betaln(alpha + x, beta + i - x)
- betaln(alpha, beta)
+ betaln(gamma + 1, delta + i)
- betaln(gamma, delta)
)
)
p1 += np.fromfunction(p2, (x.shape[0],), x=x)
idx = pd.Index(x, name="frequency")
return DataFrame(p1 * x_counts.sum(), index=idx, columns=["model"]) |
def iter(self, offs):
'''
Iterate over items in a sequence from a given offset.
Args:
offs (int): The offset to begin iterating from.
Yields:
(indx, valu): The index and valu of the item.
'''
startkey = s_common.int64en(offs)
for lkey, lval in self.slab.scanByRange(startkey, db=self.db):
indx = s_common.int64un(lkey)
valu = s_msgpack.un(lval)
yield indx, valu | Iterate over items in a sequence from a given offset.
Args:
offs (int): The offset to begin iterating from.
Yields:
(indx, valu): The index and valu of the item. | Below is the the instruction that describes the task:
### Input:
Iterate over items in a sequence from a given offset.
Args:
offs (int): The offset to begin iterating from.
Yields:
(indx, valu): The index and valu of the item.
### Response:
def iter(self, offs):
'''
Iterate over items in a sequence from a given offset.
Args:
offs (int): The offset to begin iterating from.
Yields:
(indx, valu): The index and valu of the item.
'''
startkey = s_common.int64en(offs)
for lkey, lval in self.slab.scanByRange(startkey, db=self.db):
indx = s_common.int64un(lkey)
valu = s_msgpack.un(lval)
yield indx, valu |
def ctcp_reply(self, target, ctcp_verb, argument=None):
"""Send a CTCP reply to the given target."""
# we don't support complex ctcp encapsulation because we're somewhat sane
atoms = [ctcp_verb]
if argument is not None:
atoms.append(argument)
X_DELIM = '\x01'
self.notice(target, X_DELIM + ' '.join(atoms) + X_DELIM, formatted=False) | Send a CTCP reply to the given target. | Below is the the instruction that describes the task:
### Input:
Send a CTCP reply to the given target.
### Response:
def ctcp_reply(self, target, ctcp_verb, argument=None):
"""Send a CTCP reply to the given target."""
# we don't support complex ctcp encapsulation because we're somewhat sane
atoms = [ctcp_verb]
if argument is not None:
atoms.append(argument)
X_DELIM = '\x01'
self.notice(target, X_DELIM + ' '.join(atoms) + X_DELIM, formatted=False) |
def save(self, filehandle, destination=None, metadata=None,
validate=True, catch_all_errors=False, *args, **kwargs):
"""Saves the filehandle to the provided destination or the attached
default destination. Allows passing arbitrary positional and keyword
arguments to the saving mechanism
:param filehandle: werkzeug.FileStorage instance
:param dest: String path, callable or writable destination to pass the
filehandle off to. Transfer handles transforming a string or
writable object into a callable automatically.
:param metadata: Optional mapping of metadata to pass to validators,
preprocessors, and postprocessors.
:param validate boolean: Toggle validation, defaults to True
:param catch_all_errors boolean: Toggles if validation should collect
all UploadErrors and raise a collected error message or bail out on
the first one.
"""
destination = destination or self._destination
if destination is None:
raise RuntimeError("Destination for filehandle must be provided.")
elif destination is not self._destination:
destination = _make_destination_callable(destination)
if metadata is None:
metadata = {}
if validate:
self._validate(filehandle, metadata)
filehandle = self._preprocess(filehandle, metadata)
destination(filehandle, metadata)
filehandle = self._postprocess(filehandle, metadata)
return filehandle | Saves the filehandle to the provided destination or the attached
default destination. Allows passing arbitrary positional and keyword
arguments to the saving mechanism
:param filehandle: werkzeug.FileStorage instance
:param dest: String path, callable or writable destination to pass the
filehandle off to. Transfer handles transforming a string or
writable object into a callable automatically.
:param metadata: Optional mapping of metadata to pass to validators,
preprocessors, and postprocessors.
:param validate boolean: Toggle validation, defaults to True
:param catch_all_errors boolean: Toggles if validation should collect
all UploadErrors and raise a collected error message or bail out on
the first one. | Below is the the instruction that describes the task:
### Input:
Saves the filehandle to the provided destination or the attached
default destination. Allows passing arbitrary positional and keyword
arguments to the saving mechanism
:param filehandle: werkzeug.FileStorage instance
:param dest: String path, callable or writable destination to pass the
filehandle off to. Transfer handles transforming a string or
writable object into a callable automatically.
:param metadata: Optional mapping of metadata to pass to validators,
preprocessors, and postprocessors.
:param validate boolean: Toggle validation, defaults to True
:param catch_all_errors boolean: Toggles if validation should collect
all UploadErrors and raise a collected error message or bail out on
the first one.
### Response:
def save(self, filehandle, destination=None, metadata=None,
validate=True, catch_all_errors=False, *args, **kwargs):
"""Saves the filehandle to the provided destination or the attached
default destination. Allows passing arbitrary positional and keyword
arguments to the saving mechanism
:param filehandle: werkzeug.FileStorage instance
:param dest: String path, callable or writable destination to pass the
filehandle off to. Transfer handles transforming a string or
writable object into a callable automatically.
:param metadata: Optional mapping of metadata to pass to validators,
preprocessors, and postprocessors.
:param validate boolean: Toggle validation, defaults to True
:param catch_all_errors boolean: Toggles if validation should collect
all UploadErrors and raise a collected error message or bail out on
the first one.
"""
destination = destination or self._destination
if destination is None:
raise RuntimeError("Destination for filehandle must be provided.")
elif destination is not self._destination:
destination = _make_destination_callable(destination)
if metadata is None:
metadata = {}
if validate:
self._validate(filehandle, metadata)
filehandle = self._preprocess(filehandle, metadata)
destination(filehandle, metadata)
filehandle = self._postprocess(filehandle, metadata)
return filehandle |
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True | Removes a contributor | Below is the the instruction that describes the task:
### Input:
Removes a contributor
### Response:
def remove_contributor(self, project_id, email, language):
"""
Removes a contributor
"""
self._run(
url_path="contributors/remove",
id=project_id,
email=email,
language=language
)
return True |
def build_dir():
'''
Build the directory used for templates.
'''
tag_arr = ['add', 'edit', 'view', 'list', 'infolist']
path_arr = [os.path.join(CRUD_PATH, x) for x in tag_arr]
for wpath in path_arr:
if os.path.exists(wpath):
continue
os.makedirs(wpath) | Build the directory used for templates. | Below is the the instruction that describes the task:
### Input:
Build the directory used for templates.
### Response:
def build_dir():
'''
Build the directory used for templates.
'''
tag_arr = ['add', 'edit', 'view', 'list', 'infolist']
path_arr = [os.path.join(CRUD_PATH, x) for x in tag_arr]
for wpath in path_arr:
if os.path.exists(wpath):
continue
os.makedirs(wpath) |
def canClose(self):
"""
Checks to see if the view widget can close by checking all of its \
sub-views to make sure they're ok to close.
:return <bool>
"""
for view in self.findChildren(XView):
if not view.canClose():
return False
return True | Checks to see if the view widget can close by checking all of its \
sub-views to make sure they're ok to close.
:return <bool> | Below is the the instruction that describes the task:
### Input:
Checks to see if the view widget can close by checking all of its \
sub-views to make sure they're ok to close.
:return <bool>
### Response:
def canClose(self):
"""
Checks to see if the view widget can close by checking all of its \
sub-views to make sure they're ok to close.
:return <bool>
"""
for view in self.findChildren(XView):
if not view.canClose():
return False
return True |
def put_keys(self, press_keys=None, hold_keys=None, press_delay=50):
"""Put scancodes that represent keys defined in the sequences provided.
Arguments:
press_keys: Press a sequence of keys
hold_keys: While pressing the sequence of keys, hold down the keys
defined in hold_keys.
press_delay: Number of milliseconds to delay between each press
Note: Both press_keys and hold_keys are iterable objects that yield
self.SCANCODE.keys() keys.
"""
if press_keys is None:
press_keys = []
if hold_keys is None:
hold_keys = []
release_codes = set()
put_codes = set()
try:
# hold the keys
for k in hold_keys:
put, release = self.SCANCODES[k]
# Avoid putting codes over and over
put = set(put) - put_codes
self.put_scancodes(list(put))
put_codes.update(put)
release_codes.update(release)
# press the keys
for k in press_keys:
put, release = self.SCANCODES[k]
# Avoid putting held codes
_put = set(put) - put_codes
if not _put:
continue
release = set(release) - release_codes
# Avoid releasing held codes
if not release:
continue
self.put_scancodes(list(put) + list(release))
time.sleep(press_delay / 1000.0)
finally:
# release the held keys
for code in release_codes:
self.put_scancode(code) | Put scancodes that represent keys defined in the sequences provided.
Arguments:
press_keys: Press a sequence of keys
hold_keys: While pressing the sequence of keys, hold down the keys
defined in hold_keys.
press_delay: Number of milliseconds to delay between each press
Note: Both press_keys and hold_keys are iterable objects that yield
self.SCANCODE.keys() keys. | Below is the the instruction that describes the task:
### Input:
Put scancodes that represent keys defined in the sequences provided.
Arguments:
press_keys: Press a sequence of keys
hold_keys: While pressing the sequence of keys, hold down the keys
defined in hold_keys.
press_delay: Number of milliseconds to delay between each press
Note: Both press_keys and hold_keys are iterable objects that yield
self.SCANCODE.keys() keys.
### Response:
def put_keys(self, press_keys=None, hold_keys=None, press_delay=50):
"""Put scancodes that represent keys defined in the sequences provided.
Arguments:
press_keys: Press a sequence of keys
hold_keys: While pressing the sequence of keys, hold down the keys
defined in hold_keys.
press_delay: Number of milliseconds to delay between each press
Note: Both press_keys and hold_keys are iterable objects that yield
self.SCANCODE.keys() keys.
"""
if press_keys is None:
press_keys = []
if hold_keys is None:
hold_keys = []
release_codes = set()
put_codes = set()
try:
# hold the keys
for k in hold_keys:
put, release = self.SCANCODES[k]
# Avoid putting codes over and over
put = set(put) - put_codes
self.put_scancodes(list(put))
put_codes.update(put)
release_codes.update(release)
# press the keys
for k in press_keys:
put, release = self.SCANCODES[k]
# Avoid putting held codes
_put = set(put) - put_codes
if not _put:
continue
release = set(release) - release_codes
# Avoid releasing held codes
if not release:
continue
self.put_scancodes(list(put) + list(release))
time.sleep(press_delay / 1000.0)
finally:
# release the held keys
for code in release_codes:
self.put_scancode(code) |
def dfs_tree(graph, start=0):
"""DFS, build DFS tree in unweighted graph
:param graph: directed graph in listlist or listdict format
:param int start: source vertex
:returns: precedence table
:complexity: `O(|V|+|E|)`
"""
to_visit = [start]
prec = [None] * len(graph)
while to_visit: # an empty queue equals False
node = to_visit.pop()
for neighbor in graph[node]:
if prec[neighbor] is None:
prec[neighbor] = node
to_visit.append(neighbor)
return prec | DFS, build DFS tree in unweighted graph
:param graph: directed graph in listlist or listdict format
:param int start: source vertex
:returns: precedence table
:complexity: `O(|V|+|E|)` | Below is the the instruction that describes the task:
### Input:
DFS, build DFS tree in unweighted graph
:param graph: directed graph in listlist or listdict format
:param int start: source vertex
:returns: precedence table
:complexity: `O(|V|+|E|)`
### Response:
def dfs_tree(graph, start=0):
"""DFS, build DFS tree in unweighted graph
:param graph: directed graph in listlist or listdict format
:param int start: source vertex
:returns: precedence table
:complexity: `O(|V|+|E|)`
"""
to_visit = [start]
prec = [None] * len(graph)
while to_visit: # an empty queue equals False
node = to_visit.pop()
for neighbor in graph[node]:
if prec[neighbor] is None:
prec[neighbor] = node
to_visit.append(neighbor)
return prec |
def handle_api_exceptions(self, method, *url_parts, **kwargs):
"""Call REST API and handle exceptions
Params:
method: 'HEAD', 'GET', 'POST', 'PATCH' or 'DELETE'
url_parts: like in rest_api_url() method
api_ver: like in rest_api_url() method
kwargs: other parameters passed to requests.request,
but the only notable parameter is: (... json=data)
that works like (...
headers = {'Content-Type': 'application/json'},
data=json.dumps(data))
"""
# The outer part - about error handler
assert method in ('HEAD', 'GET', 'POST', 'PATCH', 'DELETE')
cursor_context = kwargs.pop('cursor_context', None)
errorhandler = cursor_context.errorhandler if cursor_context else self.errorhandler
catched_exceptions = (SalesforceError, requests.exceptions.RequestException) if errorhandler else ()
try:
return self.handle_api_exceptions_inter(method, *url_parts, **kwargs)
except catched_exceptions:
# nothing is catched usually and error handler not used
exc_class, exc_value, _ = sys.exc_info()
errorhandler(self, cursor_context, exc_class, exc_value)
raise | Call REST API and handle exceptions
Params:
method: 'HEAD', 'GET', 'POST', 'PATCH' or 'DELETE'
url_parts: like in rest_api_url() method
api_ver: like in rest_api_url() method
kwargs: other parameters passed to requests.request,
but the only notable parameter is: (... json=data)
that works like (...
headers = {'Content-Type': 'application/json'},
data=json.dumps(data)) | Below is the the instruction that describes the task:
### Input:
Call REST API and handle exceptions
Params:
method: 'HEAD', 'GET', 'POST', 'PATCH' or 'DELETE'
url_parts: like in rest_api_url() method
api_ver: like in rest_api_url() method
kwargs: other parameters passed to requests.request,
but the only notable parameter is: (... json=data)
that works like (...
headers = {'Content-Type': 'application/json'},
data=json.dumps(data))
### Response:
def handle_api_exceptions(self, method, *url_parts, **kwargs):
"""Call REST API and handle exceptions
Params:
method: 'HEAD', 'GET', 'POST', 'PATCH' or 'DELETE'
url_parts: like in rest_api_url() method
api_ver: like in rest_api_url() method
kwargs: other parameters passed to requests.request,
but the only notable parameter is: (... json=data)
that works like (...
headers = {'Content-Type': 'application/json'},
data=json.dumps(data))
"""
# The outer part - about error handler
assert method in ('HEAD', 'GET', 'POST', 'PATCH', 'DELETE')
cursor_context = kwargs.pop('cursor_context', None)
errorhandler = cursor_context.errorhandler if cursor_context else self.errorhandler
catched_exceptions = (SalesforceError, requests.exceptions.RequestException) if errorhandler else ()
try:
return self.handle_api_exceptions_inter(method, *url_parts, **kwargs)
except catched_exceptions:
# nothing is catched usually and error handler not used
exc_class, exc_value, _ = sys.exc_info()
errorhandler(self, cursor_context, exc_class, exc_value)
raise |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'message_type') and self.message_type is not None:
_dict['message_type'] = self.message_type
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'options') and self.options is not None:
_dict['options'] = self.options._to_dict()
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x._to_dict() for x in self.intents]
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self, 'suggestion_id') and self.suggestion_id is not None:
_dict['suggestion_id'] = self.suggestion_id
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'message_type') and self.message_type is not None:
_dict['message_type'] = self.message_type
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'options') and self.options is not None:
_dict['options'] = self.options._to_dict()
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x._to_dict() for x in self.intents]
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self, 'suggestion_id') and self.suggestion_id is not None:
_dict['suggestion_id'] = self.suggestion_id
return _dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.