code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _get_gc2_coordinates_for_rupture(self, edge_sets):
"""
Calculates the GC2 coordinates for the nodes of the upper edge of the
fault
"""
# Establish GC2 length - for use with Ry0
rup_gc2t, rup_gc2u = self.get_generalised_coordinates(
edge_sets[:, 0], edge_sets[:, 1])
# GC2 length should be the largest positive GC2 value of the edges
self.gc_length = numpy.max(rup_gc2u) | Calculates the GC2 coordinates for the nodes of the upper edge of the
fault | Below is the the instruction that describes the task:
### Input:
Calculates the GC2 coordinates for the nodes of the upper edge of the
fault
### Response:
def _get_gc2_coordinates_for_rupture(self, edge_sets):
"""
Calculates the GC2 coordinates for the nodes of the upper edge of the
fault
"""
# Establish GC2 length - for use with Ry0
rup_gc2t, rup_gc2u = self.get_generalised_coordinates(
edge_sets[:, 0], edge_sets[:, 1])
# GC2 length should be the largest positive GC2 value of the edges
self.gc_length = numpy.max(rup_gc2u) |
def include(self, issue):
""" Return true if the issue in question should be included """
only_if_assigned = self.config.get('only_if_assigned', None)
if only_if_assigned:
owner = self.get_owner(issue)
include_owners = [only_if_assigned]
if self.config.get('also_unassigned', None, asbool):
include_owners.append(None)
return owner in include_owners
only_if_author = self.config.get('only_if_author', None)
if only_if_author:
return self.get_author(issue) == only_if_author
return True | Return true if the issue in question should be included | Below is the the instruction that describes the task:
### Input:
Return true if the issue in question should be included
### Response:
def include(self, issue):
""" Return true if the issue in question should be included """
only_if_assigned = self.config.get('only_if_assigned', None)
if only_if_assigned:
owner = self.get_owner(issue)
include_owners = [only_if_assigned]
if self.config.get('also_unassigned', None, asbool):
include_owners.append(None)
return owner in include_owners
only_if_author = self.config.get('only_if_author', None)
if only_if_author:
return self.get_author(issue) == only_if_author
return True |
def _filterDictToStr(self, filterDict):
""" Converts friend filters to a string representation for transport. """
values = []
for key, vals in filterDict.items():
if key not in ('contentRating', 'label'):
raise BadRequest('Unknown filter key: %s', key)
values.append('%s=%s' % (key, '%2C'.join(vals)))
return '|'.join(values) | Converts friend filters to a string representation for transport. | Below is the the instruction that describes the task:
### Input:
Converts friend filters to a string representation for transport.
### Response:
def _filterDictToStr(self, filterDict):
""" Converts friend filters to a string representation for transport. """
values = []
for key, vals in filterDict.items():
if key not in ('contentRating', 'label'):
raise BadRequest('Unknown filter key: %s', key)
values.append('%s=%s' % (key, '%2C'.join(vals)))
return '|'.join(values) |
def _readtoken(self, name, pos, length):
"""Reads a token from the bitstring and returns the result."""
if length is not None and int(length) > self.length - pos:
raise ReadError("Reading off the end of the data. "
"Tried to read {0} bits when only {1} available.".format(int(length), self.length - pos))
try:
val = name_to_read[name](self, length, pos)
return val, pos + length
except KeyError:
if name == 'pad':
return None, pos + length
raise ValueError("Can't parse token {0}:{1}".format(name, length))
except TypeError:
# This is for the 'ue', 'se' and 'bool' tokens. They will also return the new pos.
return name_to_read[name](self, pos) | Reads a token from the bitstring and returns the result. | Below is the the instruction that describes the task:
### Input:
Reads a token from the bitstring and returns the result.
### Response:
def _readtoken(self, name, pos, length):
"""Reads a token from the bitstring and returns the result."""
if length is not None and int(length) > self.length - pos:
raise ReadError("Reading off the end of the data. "
"Tried to read {0} bits when only {1} available.".format(int(length), self.length - pos))
try:
val = name_to_read[name](self, length, pos)
return val, pos + length
except KeyError:
if name == 'pad':
return None, pos + length
raise ValueError("Can't parse token {0}:{1}".format(name, length))
except TypeError:
# This is for the 'ue', 'se' and 'bool' tokens. They will also return the new pos.
return name_to_read[name](self, pos) |
def estimate_shift(signal, genome=None, windowsize=5000, thresh=None,
nwindows=1000, maxlag=500, array_kwargs=None,
verbose=False):
"""
Experimental: cross-correlation to estimate the shift width of ChIP-seq
data
This can be interpreted as the binding site footprint.
For ChIP-seq, the plus and minus strand reads tend to be shifted in the 5'
direction away from each other. Various ChIP-seq peak-callers estimate
this distance; this function provides a quick, tunable way to do so using
cross-correlation. The resulting shift can then be incorporated into
subsequent calls to `array` by adding the shift_width kwarg.
:param signal: genomic_signal object
:param genome: String assembly for constructing windows
:param nwindows: Number of windows to compute cross-correlation on
:param windowsize: Size of each window to compute cross-correlation on.
:param thresh: Threshold read coverage to run cross-correlation on. This
is likely to be a function of the fragment size provided in
`array_kwargs` `windowsize`. If `thresh` is small, then the cross
correlation can be noisy.
:param maxlag: Max shift to look for
:param array_kwargs: Kwargs passed directly to genomic_signal.array, with
the default of `bins=windowsize` for single-bp resolution, and
`read_strand` will be overwritten.
:param verbose: Be verbose.
Returns lags and a `maxlag*2+1` x `nwindows` matrix of cross-correlations.
You can then plot the average cross-correlation function with::
plt.plot(lags, shift.mean(axis=0))
and get the distance to shift with::
d = lags[np.argmax(shift.mean(axis=0))]
and then plot that with::
plt.axvline(d, color='k', linestyle='--')
The number of windows with at least `thresh` coverage is::
shift.shape[0]
"""
if thresh is None:
thresh = 0
if genome is None:
genome = signal.genome()
if array_kwargs is None:
array_kwargs = {}
array_kwargs.pop('read_strand', None)
if 'bins' not in array_kwargs:
array_kwargs['bins'] = windowsize
def add_strand(f, strand):
fields = f.fields[:]
while len(fields) < 5:
fields.append('.')
fields.append(strand)
return pybedtools.create_interval_from_list(fields)
windows = pybedtools.BedTool()\
.window_maker(genome=genome, w=windowsize)
random_subset = pybedtools.BedTool(windows[:nwindows])\
.shuffle(genome=genome).saveas()
if verbose:
sys.stderr.write("Getting plus-strand signal for %s regions...\n"
% nwindows)
sys.stderr.flush()
plus = signal.array(
features=random_subset,
read_strand="+",
**array_kwargs).astype(float)
if verbose:
sys.stderr.write("Getting minus-strand signal for %s regions...\n"
% nwindows)
sys.stderr.flush()
minus = signal.array(
features=random_subset,
read_strand="-",
**array_kwargs).astype(float)
# only do cross-correlation if you have enough reads to do so
enough = ((plus.sum(axis=1) / windowsize) > thresh) \
& ((minus.sum(axis=1) / windowsize) > thresh)
if verbose:
sys.stderr.write(
"Running cross-correlation on %s regions that passed "
"threshold\n" % sum(enough))
results = np.zeros((sum(enough), 2 * maxlag + 1))
for i, xy in enumerate(izip(plus[enough], minus[enough])):
x, y = xy
results[i] = xcorr(x, y, maxlag)
lags = np.arange(-maxlag, maxlag + 1)
return lags, results | Experimental: cross-correlation to estimate the shift width of ChIP-seq
data
This can be interpreted as the binding site footprint.
For ChIP-seq, the plus and minus strand reads tend to be shifted in the 5'
direction away from each other. Various ChIP-seq peak-callers estimate
this distance; this function provides a quick, tunable way to do so using
cross-correlation. The resulting shift can then be incorporated into
subsequent calls to `array` by adding the shift_width kwarg.
:param signal: genomic_signal object
:param genome: String assembly for constructing windows
:param nwindows: Number of windows to compute cross-correlation on
:param windowsize: Size of each window to compute cross-correlation on.
:param thresh: Threshold read coverage to run cross-correlation on. This
is likely to be a function of the fragment size provided in
`array_kwargs` `windowsize`. If `thresh` is small, then the cross
correlation can be noisy.
:param maxlag: Max shift to look for
:param array_kwargs: Kwargs passed directly to genomic_signal.array, with
the default of `bins=windowsize` for single-bp resolution, and
`read_strand` will be overwritten.
:param verbose: Be verbose.
Returns lags and a `maxlag*2+1` x `nwindows` matrix of cross-correlations.
You can then plot the average cross-correlation function with::
plt.plot(lags, shift.mean(axis=0))
and get the distance to shift with::
d = lags[np.argmax(shift.mean(axis=0))]
and then plot that with::
plt.axvline(d, color='k', linestyle='--')
The number of windows with at least `thresh` coverage is::
shift.shape[0] | Below is the the instruction that describes the task:
### Input:
Experimental: cross-correlation to estimate the shift width of ChIP-seq
data
This can be interpreted as the binding site footprint.
For ChIP-seq, the plus and minus strand reads tend to be shifted in the 5'
direction away from each other. Various ChIP-seq peak-callers estimate
this distance; this function provides a quick, tunable way to do so using
cross-correlation. The resulting shift can then be incorporated into
subsequent calls to `array` by adding the shift_width kwarg.
:param signal: genomic_signal object
:param genome: String assembly for constructing windows
:param nwindows: Number of windows to compute cross-correlation on
:param windowsize: Size of each window to compute cross-correlation on.
:param thresh: Threshold read coverage to run cross-correlation on. This
is likely to be a function of the fragment size provided in
`array_kwargs` `windowsize`. If `thresh` is small, then the cross
correlation can be noisy.
:param maxlag: Max shift to look for
:param array_kwargs: Kwargs passed directly to genomic_signal.array, with
the default of `bins=windowsize` for single-bp resolution, and
`read_strand` will be overwritten.
:param verbose: Be verbose.
Returns lags and a `maxlag*2+1` x `nwindows` matrix of cross-correlations.
You can then plot the average cross-correlation function with::
plt.plot(lags, shift.mean(axis=0))
and get the distance to shift with::
d = lags[np.argmax(shift.mean(axis=0))]
and then plot that with::
plt.axvline(d, color='k', linestyle='--')
The number of windows with at least `thresh` coverage is::
shift.shape[0]
### Response:
def estimate_shift(signal, genome=None, windowsize=5000, thresh=None,
nwindows=1000, maxlag=500, array_kwargs=None,
verbose=False):
"""
Experimental: cross-correlation to estimate the shift width of ChIP-seq
data
This can be interpreted as the binding site footprint.
For ChIP-seq, the plus and minus strand reads tend to be shifted in the 5'
direction away from each other. Various ChIP-seq peak-callers estimate
this distance; this function provides a quick, tunable way to do so using
cross-correlation. The resulting shift can then be incorporated into
subsequent calls to `array` by adding the shift_width kwarg.
:param signal: genomic_signal object
:param genome: String assembly for constructing windows
:param nwindows: Number of windows to compute cross-correlation on
:param windowsize: Size of each window to compute cross-correlation on.
:param thresh: Threshold read coverage to run cross-correlation on. This
is likely to be a function of the fragment size provided in
`array_kwargs` `windowsize`. If `thresh` is small, then the cross
correlation can be noisy.
:param maxlag: Max shift to look for
:param array_kwargs: Kwargs passed directly to genomic_signal.array, with
the default of `bins=windowsize` for single-bp resolution, and
`read_strand` will be overwritten.
:param verbose: Be verbose.
Returns lags and a `maxlag*2+1` x `nwindows` matrix of cross-correlations.
You can then plot the average cross-correlation function with::
plt.plot(lags, shift.mean(axis=0))
and get the distance to shift with::
d = lags[np.argmax(shift.mean(axis=0))]
and then plot that with::
plt.axvline(d, color='k', linestyle='--')
The number of windows with at least `thresh` coverage is::
shift.shape[0]
"""
if thresh is None:
thresh = 0
if genome is None:
genome = signal.genome()
if array_kwargs is None:
array_kwargs = {}
array_kwargs.pop('read_strand', None)
if 'bins' not in array_kwargs:
array_kwargs['bins'] = windowsize
def add_strand(f, strand):
fields = f.fields[:]
while len(fields) < 5:
fields.append('.')
fields.append(strand)
return pybedtools.create_interval_from_list(fields)
windows = pybedtools.BedTool()\
.window_maker(genome=genome, w=windowsize)
random_subset = pybedtools.BedTool(windows[:nwindows])\
.shuffle(genome=genome).saveas()
if verbose:
sys.stderr.write("Getting plus-strand signal for %s regions...\n"
% nwindows)
sys.stderr.flush()
plus = signal.array(
features=random_subset,
read_strand="+",
**array_kwargs).astype(float)
if verbose:
sys.stderr.write("Getting minus-strand signal for %s regions...\n"
% nwindows)
sys.stderr.flush()
minus = signal.array(
features=random_subset,
read_strand="-",
**array_kwargs).astype(float)
# only do cross-correlation if you have enough reads to do so
enough = ((plus.sum(axis=1) / windowsize) > thresh) \
& ((minus.sum(axis=1) / windowsize) > thresh)
if verbose:
sys.stderr.write(
"Running cross-correlation on %s regions that passed "
"threshold\n" % sum(enough))
results = np.zeros((sum(enough), 2 * maxlag + 1))
for i, xy in enumerate(izip(plus[enough], minus[enough])):
x, y = xy
results[i] = xcorr(x, y, maxlag)
lags = np.arange(-maxlag, maxlag + 1)
return lags, results |
def e():
"""This is a hypothetical reference radiator. All wavelengths in CIE
illuminant E are weighted equally with a relative spectral power of 100.0.
"""
lmbda = 1.0e-9 * numpy.arange(300, 831)
data = numpy.full(lmbda.shape, 100.0)
return lmbda, data | This is a hypothetical reference radiator. All wavelengths in CIE
illuminant E are weighted equally with a relative spectral power of 100.0. | Below is the the instruction that describes the task:
### Input:
This is a hypothetical reference radiator. All wavelengths in CIE
illuminant E are weighted equally with a relative spectral power of 100.0.
### Response:
def e():
"""This is a hypothetical reference radiator. All wavelengths in CIE
illuminant E are weighted equally with a relative spectral power of 100.0.
"""
lmbda = 1.0e-9 * numpy.arange(300, 831)
data = numpy.full(lmbda.shape, 100.0)
return lmbda, data |
def action_import(location, verbose=True):
"""
Import files into the local repo
"""
location = str(location) # prevent errors from unicode being passed
# 1) extract file from location and save locally
ONTOSPY_LOCAL_MODELS = get_home_location()
fullpath = ""
try:
if location.startswith("www."): #support for lazy people
location = "http://%s" % str(location)
if location.startswith("http"):
# print("here")
headers = {'Accept': "application/rdf+xml"}
try:
# Py2
req = urllib2.request(location, headers=headers)
res = urllib2.urlopen(req)
except:
# Py3
req = urllib.request.Request(location, headers=headers)
res = urlopen(req)
final_location = res.geturl() # after 303 redirects
printDebug("Saving data from <%s>" % final_location, "green")
# filename = final_location.split("/")[-1] or final_location.split("/")[-2]
filename = location.replace("http://", "").replace("/", "_")
if not filename.lower().endswith(
('.rdf', '.owl', '.rdfs', '.ttl', '.n3')):
filename = filename + ".rdf"
fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename # 2016-04-08
# fullpath = ONTOSPY_LOCAL_MODELS + filename
# print("==DEBUG", final_location, "**", filename,"**", fullpath)
file_ = open(fullpath, 'wb')
file_.write(res.read())
file_.close()
else:
if os.path.isfile(location):
filename = location.split("/")[-1] or location.split("/")[-2]
fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename
shutil.copy(location, fullpath)
else:
raise ValueError('The location specified is not a file.')
# print("Saved local copy")
except:
printDebug(
"Error retrieving file. Please make sure <%s> is a valid location."
% location, "important")
if os.path.exists(fullpath):
os.remove(fullpath)
return None
try:
g = Ontospy(fullpath, verbose=verbose)
# printDebug("----------")
except:
g = None
if os.path.exists(fullpath):
os.remove(fullpath)
printDebug(
"Error parsing file. Please make sure %s contains valid RDF." %
location, "important")
if g:
printDebug("Caching...", "red")
do_pickle_ontology(filename, g)
printDebug("----------\n...completed!", "important")
# finally...
return g | Import files into the local repo | Below is the the instruction that describes the task:
### Input:
Import files into the local repo
### Response:
def action_import(location, verbose=True):
"""
Import files into the local repo
"""
location = str(location) # prevent errors from unicode being passed
# 1) extract file from location and save locally
ONTOSPY_LOCAL_MODELS = get_home_location()
fullpath = ""
try:
if location.startswith("www."): #support for lazy people
location = "http://%s" % str(location)
if location.startswith("http"):
# print("here")
headers = {'Accept': "application/rdf+xml"}
try:
# Py2
req = urllib2.request(location, headers=headers)
res = urllib2.urlopen(req)
except:
# Py3
req = urllib.request.Request(location, headers=headers)
res = urlopen(req)
final_location = res.geturl() # after 303 redirects
printDebug("Saving data from <%s>" % final_location, "green")
# filename = final_location.split("/")[-1] or final_location.split("/")[-2]
filename = location.replace("http://", "").replace("/", "_")
if not filename.lower().endswith(
('.rdf', '.owl', '.rdfs', '.ttl', '.n3')):
filename = filename + ".rdf"
fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename # 2016-04-08
# fullpath = ONTOSPY_LOCAL_MODELS + filename
# print("==DEBUG", final_location, "**", filename,"**", fullpath)
file_ = open(fullpath, 'wb')
file_.write(res.read())
file_.close()
else:
if os.path.isfile(location):
filename = location.split("/")[-1] or location.split("/")[-2]
fullpath = ONTOSPY_LOCAL_MODELS + "/" + filename
shutil.copy(location, fullpath)
else:
raise ValueError('The location specified is not a file.')
# print("Saved local copy")
except:
printDebug(
"Error retrieving file. Please make sure <%s> is a valid location."
% location, "important")
if os.path.exists(fullpath):
os.remove(fullpath)
return None
try:
g = Ontospy(fullpath, verbose=verbose)
# printDebug("----------")
except:
g = None
if os.path.exists(fullpath):
os.remove(fullpath)
printDebug(
"Error parsing file. Please make sure %s contains valid RDF." %
location, "important")
if g:
printDebug("Caching...", "red")
do_pickle_ontology(filename, g)
printDebug("----------\n...completed!", "important")
# finally...
return g |
def stem(self, word):
"""
Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs)
# STEP 1: Remove instrumental case
if r1.endswith(self.__step1_suffixes):
for double_cons in self.__double_consonants:
if word[-2-len(double_cons):-2] == double_cons:
word = "".join((word[:-4], word[-3]))
if r1[-2-len(double_cons):-2] == double_cons:
r1 = "".join((r1[:-4], r1[-3]))
break
# STEP 2: Remove frequent cases
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith("\xE1"):
word = "".join((word[:-1], "a"))
r1 = "".join((r1[:-1], "a"))
elif r1.endswith("\xE9"):
word = "".join((word[:-1], "e"))
r1 = "".join((r1[:-1], "e"))
break
# STEP 3: Remove special cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "\xE9n":
word = "".join((word[:-2], "e"))
r1 = "".join((r1[:-2], "e"))
else:
word = "".join((word[:-len(suffix)], "a"))
r1 = "".join((r1[:-len(suffix)], "a"))
break
# STEP 4: Remove other cases
for suffix in self.__step4_suffixes:
if r1.endswith(suffix):
if suffix == "\xE1stul":
word = "".join((word[:-5], "a"))
r1 = "".join((r1[:-5], "a"))
elif suffix == "\xE9st\xFCl":
word = "".join((word[:-5], "e"))
r1 = "".join((r1[:-5], "e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 5: Remove factive case
for suffix in self.__step5_suffixes:
if r1.endswith(suffix):
for double_cons in self.__double_consonants:
if word[-1-len(double_cons):-1] == double_cons:
word = "".join((word[:-3], word[-2]))
if r1[-1-len(double_cons):-1] == double_cons:
r1 = "".join((r1[:-3], r1[-2]))
break
# STEP 6: Remove owned
for suffix in self.__step6_suffixes:
if r1.endswith(suffix):
if suffix in ("\xE1k\xE9", "\xE1\xE9i"):
word = "".join((word[:-3], "a"))
r1 = "".join((r1[:-3], "a"))
elif suffix in ("\xE9k\xE9", "\xE9\xE9i",
"\xE9\xE9"):
word = "".join((word[:-len(suffix)], "e"))
r1 = "".join((r1[:-len(suffix)], "e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 7: Remove singular owner suffixes
for suffix in self.__step7_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1nk", "\xE1juk", "\xE1m",
"\xE1d", "\xE1"):
word = "".join((word[:-len(suffix)], "a"))
r1 = "".join((r1[:-len(suffix)], "a"))
elif suffix in ("\xE9nk", "\xE9j\xFCk",
"\xE9m", "\xE9d", "\xE9"):
word = "".join((word[:-len(suffix)], "e"))
r1 = "".join((r1[:-len(suffix)], "e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 8: Remove plural owner suffixes
for suffix in self.__step8_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1im", "\xE1id", "\xE1i",
"\xE1ink", "\xE1itok", "\xE1ik"):
word = "".join((word[:-len(suffix)], "a"))
r1 = "".join((r1[:-len(suffix)], "a"))
elif suffix in ("\xE9im", "\xE9id", "\xE9i",
"\xE9ink", "\xE9itek", "\xE9ik"):
word = "".join((word[:-len(suffix)], "e"))
r1 = "".join((r1[:-len(suffix)], "e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 9: Remove plural suffixes
for suffix in self.__step9_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "\xE1k":
word = "".join((word[:-2], "a"))
elif suffix == "\xE9k":
word = "".join((word[:-2], "e"))
else:
word = word[:-len(suffix)]
break
return word | Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode | Below is the the instruction that describes the task:
### Input:
Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
### Response:
def stem(self, word):
"""
Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs)
# STEP 1: Remove instrumental case
if r1.endswith(self.__step1_suffixes):
for double_cons in self.__double_consonants:
if word[-2-len(double_cons):-2] == double_cons:
word = "".join((word[:-4], word[-3]))
if r1[-2-len(double_cons):-2] == double_cons:
r1 = "".join((r1[:-4], r1[-3]))
break
# STEP 2: Remove frequent cases
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith("\xE1"):
word = "".join((word[:-1], "a"))
r1 = "".join((r1[:-1], "a"))
elif r1.endswith("\xE9"):
word = "".join((word[:-1], "e"))
r1 = "".join((r1[:-1], "e"))
break
# STEP 3: Remove special cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "\xE9n":
word = "".join((word[:-2], "e"))
r1 = "".join((r1[:-2], "e"))
else:
word = "".join((word[:-len(suffix)], "a"))
r1 = "".join((r1[:-len(suffix)], "a"))
break
# STEP 4: Remove other cases
for suffix in self.__step4_suffixes:
if r1.endswith(suffix):
if suffix == "\xE1stul":
word = "".join((word[:-5], "a"))
r1 = "".join((r1[:-5], "a"))
elif suffix == "\xE9st\xFCl":
word = "".join((word[:-5], "e"))
r1 = "".join((r1[:-5], "e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 5: Remove factive case
for suffix in self.__step5_suffixes:
if r1.endswith(suffix):
for double_cons in self.__double_consonants:
if word[-1-len(double_cons):-1] == double_cons:
word = "".join((word[:-3], word[-2]))
if r1[-1-len(double_cons):-1] == double_cons:
r1 = "".join((r1[:-3], r1[-2]))
break
# STEP 6: Remove owned
for suffix in self.__step6_suffixes:
if r1.endswith(suffix):
if suffix in ("\xE1k\xE9", "\xE1\xE9i"):
word = "".join((word[:-3], "a"))
r1 = "".join((r1[:-3], "a"))
elif suffix in ("\xE9k\xE9", "\xE9\xE9i",
"\xE9\xE9"):
word = "".join((word[:-len(suffix)], "e"))
r1 = "".join((r1[:-len(suffix)], "e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 7: Remove singular owner suffixes
for suffix in self.__step7_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1nk", "\xE1juk", "\xE1m",
"\xE1d", "\xE1"):
word = "".join((word[:-len(suffix)], "a"))
r1 = "".join((r1[:-len(suffix)], "a"))
elif suffix in ("\xE9nk", "\xE9j\xFCk",
"\xE9m", "\xE9d", "\xE9"):
word = "".join((word[:-len(suffix)], "e"))
r1 = "".join((r1[:-len(suffix)], "e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 8: Remove plural owner suffixes
for suffix in self.__step8_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1im", "\xE1id", "\xE1i",
"\xE1ink", "\xE1itok", "\xE1ik"):
word = "".join((word[:-len(suffix)], "a"))
r1 = "".join((r1[:-len(suffix)], "a"))
elif suffix in ("\xE9im", "\xE9id", "\xE9i",
"\xE9ink", "\xE9itek", "\xE9ik"):
word = "".join((word[:-len(suffix)], "e"))
r1 = "".join((r1[:-len(suffix)], "e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 9: Remove plural suffixes
for suffix in self.__step9_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "\xE1k":
word = "".join((word[:-2], "a"))
elif suffix == "\xE9k":
word = "".join((word[:-2], "e"))
else:
word = word[:-len(suffix)]
break
return word |
def configure(
self,
accountID,
**kwargs
):
"""
Set the client-configurable portions of an Account.
Args:
accountID:
Account Identifier
alias:
Client-defined alias (name) for the Account
marginRate:
The string representation of a decimal number.
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'PATCH',
'/v3/accounts/{accountID}/configuration'
)
request.set_path_param(
'accountID',
accountID
)
body = EntityDict()
if 'alias' in kwargs:
body.set('alias', kwargs['alias'])
if 'marginRate' in kwargs:
body.set('marginRate', kwargs['marginRate'])
request.set_body_dict(body.dict)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('clientConfigureTransaction') is not None:
parsed_body['clientConfigureTransaction'] = \
self.ctx.transaction.ClientConfigureTransaction.from_dict(
jbody['clientConfigureTransaction'],
self.ctx
)
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
elif str(response.status) == "400":
if jbody.get('clientConfigureRejectTransaction') is not None:
parsed_body['clientConfigureRejectTransaction'] = \
self.ctx.transaction.ClientConfigureRejectTransaction.from_dict(
jbody['clientConfigureRejectTransaction'],
self.ctx
)
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "403":
if jbody.get('clientConfigureRejectTransaction') is not None:
parsed_body['clientConfigureRejectTransaction'] = \
self.ctx.transaction.ClientConfigureRejectTransaction.from_dict(
jbody['clientConfigureRejectTransaction'],
self.ctx
)
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response | Set the client-configurable portions of an Account.
Args:
accountID:
Account Identifier
alias:
Client-defined alias (name) for the Account
marginRate:
The string representation of a decimal number.
Returns:
v20.response.Response containing the results from submitting the
request | Below is the the instruction that describes the task:
### Input:
Set the client-configurable portions of an Account.
Args:
accountID:
Account Identifier
alias:
Client-defined alias (name) for the Account
marginRate:
The string representation of a decimal number.
Returns:
v20.response.Response containing the results from submitting the
request
### Response:
def configure(
self,
accountID,
**kwargs
):
"""
Set the client-configurable portions of an Account.
Args:
accountID:
Account Identifier
alias:
Client-defined alias (name) for the Account
marginRate:
The string representation of a decimal number.
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'PATCH',
'/v3/accounts/{accountID}/configuration'
)
request.set_path_param(
'accountID',
accountID
)
body = EntityDict()
if 'alias' in kwargs:
body.set('alias', kwargs['alias'])
if 'marginRate' in kwargs:
body.set('marginRate', kwargs['marginRate'])
request.set_body_dict(body.dict)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('clientConfigureTransaction') is not None:
parsed_body['clientConfigureTransaction'] = \
self.ctx.transaction.ClientConfigureTransaction.from_dict(
jbody['clientConfigureTransaction'],
self.ctx
)
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
elif str(response.status) == "400":
if jbody.get('clientConfigureRejectTransaction') is not None:
parsed_body['clientConfigureRejectTransaction'] = \
self.ctx.transaction.ClientConfigureRejectTransaction.from_dict(
jbody['clientConfigureRejectTransaction'],
self.ctx
)
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "403":
if jbody.get('clientConfigureRejectTransaction') is not None:
parsed_body['clientConfigureRejectTransaction'] = \
self.ctx.transaction.ClientConfigureRejectTransaction.from_dict(
jbody['clientConfigureRejectTransaction'],
self.ctx
)
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response |
def get_unchanged(src_list, npred_dict_new,
npred_dict_old,
npred_threshold=1e4,
frac_threshold=0.9):
"""Compare two dictionarys of npreds, and get the list of sources
than have changed less that set thresholds
Parameters
----------
src_list : list
List of sources to examine
npred_dict_new : dict
Dictionary mapping source name to npred for the current weights file
npred_dict_old : dict
Dictionary mapping source name to npred for the previous weights file
npred_threshold : float
Minimum value of npred above which to consider sources changed
frac_threshold : float
Value of npred_old / npred_new above which to consider sources unchanged
Returns
-------
l : list
Names of 'unchanged' sources
"""
o = []
for s in src_list:
npred_new = npred_dict_new[s]
if npred_new < npred_threshold:
o += [s]
continue
if npred_dict_old is None:
npred_old = 0.
else:
npred_old = npred_dict_old[s]
frac = npred_old / npred_new
if frac > frac_threshold:
o += [s]
return o | Compare two dictionarys of npreds, and get the list of sources
than have changed less that set thresholds
Parameters
----------
src_list : list
List of sources to examine
npred_dict_new : dict
Dictionary mapping source name to npred for the current weights file
npred_dict_old : dict
Dictionary mapping source name to npred for the previous weights file
npred_threshold : float
Minimum value of npred above which to consider sources changed
frac_threshold : float
Value of npred_old / npred_new above which to consider sources unchanged
Returns
-------
l : list
Names of 'unchanged' sources | Below is the the instruction that describes the task:
### Input:
Compare two dictionarys of npreds, and get the list of sources
than have changed less that set thresholds
Parameters
----------
src_list : list
List of sources to examine
npred_dict_new : dict
Dictionary mapping source name to npred for the current weights file
npred_dict_old : dict
Dictionary mapping source name to npred for the previous weights file
npred_threshold : float
Minimum value of npred above which to consider sources changed
frac_threshold : float
Value of npred_old / npred_new above which to consider sources unchanged
Returns
-------
l : list
Names of 'unchanged' sources
### Response:
def get_unchanged(src_list, npred_dict_new,
npred_dict_old,
npred_threshold=1e4,
frac_threshold=0.9):
"""Compare two dictionarys of npreds, and get the list of sources
than have changed less that set thresholds
Parameters
----------
src_list : list
List of sources to examine
npred_dict_new : dict
Dictionary mapping source name to npred for the current weights file
npred_dict_old : dict
Dictionary mapping source name to npred for the previous weights file
npred_threshold : float
Minimum value of npred above which to consider sources changed
frac_threshold : float
Value of npred_old / npred_new above which to consider sources unchanged
Returns
-------
l : list
Names of 'unchanged' sources
"""
o = []
for s in src_list:
npred_new = npred_dict_new[s]
if npred_new < npred_threshold:
o += [s]
continue
if npred_dict_old is None:
npred_old = 0.
else:
npred_old = npred_dict_old[s]
frac = npred_old / npred_new
if frac > frac_threshold:
o += [s]
return o |
def kill_pane(self, pane):
"""
Kill the given pane, and remove it from the arrangement.
"""
assert isinstance(pane, Pane)
# Send kill signal.
if not pane.process.is_terminated:
pane.process.kill()
# Remove from layout.
self.arrangement.remove_pane(pane) | Kill the given pane, and remove it from the arrangement. | Below is the the instruction that describes the task:
### Input:
Kill the given pane, and remove it from the arrangement.
### Response:
def kill_pane(self, pane):
"""
Kill the given pane, and remove it from the arrangement.
"""
assert isinstance(pane, Pane)
# Send kill signal.
if not pane.process.is_terminated:
pane.process.kill()
# Remove from layout.
self.arrangement.remove_pane(pane) |
def close_all():
"""Close all open/active plotters"""
for key, p in _ALL_PLOTTERS.items():
p.close()
_ALL_PLOTTERS.clear()
return True | Close all open/active plotters | Below is the the instruction that describes the task:
### Input:
Close all open/active plotters
### Response:
def close_all():
"""Close all open/active plotters"""
for key, p in _ALL_PLOTTERS.items():
p.close()
_ALL_PLOTTERS.clear()
return True |
def json_dict_copy(json_object, property_list, defaultValue=None):
"""
property_list = [
{ "name":"name", "alternateName": ["name","title"]},
{ "name":"birthDate", "alternateName": ["dob","dateOfBirth"] },
{ "name":"description" }
]
"""
ret = {}
for prop in property_list:
p_name = prop["name"]
for alias in prop.get("alternateName", []):
if json_object.get(alias) is not None:
ret[p_name] = json_object.get(alias)
break
if not p_name in ret:
if p_name in json_object:
ret[p_name] = json_object[p_name]
elif defaultValue is not None:
ret[p_name] = defaultValue
return ret | property_list = [
{ "name":"name", "alternateName": ["name","title"]},
{ "name":"birthDate", "alternateName": ["dob","dateOfBirth"] },
{ "name":"description" }
] | Below is the the instruction that describes the task:
### Input:
property_list = [
{ "name":"name", "alternateName": ["name","title"]},
{ "name":"birthDate", "alternateName": ["dob","dateOfBirth"] },
{ "name":"description" }
]
### Response:
def json_dict_copy(json_object, property_list, defaultValue=None):
"""
property_list = [
{ "name":"name", "alternateName": ["name","title"]},
{ "name":"birthDate", "alternateName": ["dob","dateOfBirth"] },
{ "name":"description" }
]
"""
ret = {}
for prop in property_list:
p_name = prop["name"]
for alias in prop.get("alternateName", []):
if json_object.get(alias) is not None:
ret[p_name] = json_object.get(alias)
break
if not p_name in ret:
if p_name in json_object:
ret[p_name] = json_object[p_name]
elif defaultValue is not None:
ret[p_name] = defaultValue
return ret |
def _merge_state_from_predecessors(self, node):
"""
Get abstract states for all predecessors of the node, merge them, and return the merged state.
:param node: The node in graph.
:return: A merged state, or None if no predecessor is available.
"""
preds = self._graph_visitor.predecessors(node)
states = [ self._state_map[n] for n in preds if n in self._state_map ]
if not states:
return None
return reduce(lambda s0, s1: self._merge_states(node, s0, s1), states[1:], states[0]) | Get abstract states for all predecessors of the node, merge them, and return the merged state.
:param node: The node in graph.
:return: A merged state, or None if no predecessor is available. | Below is the the instruction that describes the task:
### Input:
Get abstract states for all predecessors of the node, merge them, and return the merged state.
:param node: The node in graph.
:return: A merged state, or None if no predecessor is available.
### Response:
def _merge_state_from_predecessors(self, node):
"""
Get abstract states for all predecessors of the node, merge them, and return the merged state.
:param node: The node in graph.
:return: A merged state, or None if no predecessor is available.
"""
preds = self._graph_visitor.predecessors(node)
states = [ self._state_map[n] for n in preds if n in self._state_map ]
if not states:
return None
return reduce(lambda s0, s1: self._merge_states(node, s0, s1), states[1:], states[0]) |
def get_monomers(self, ligands=True):
"""Retrieves all the `Monomers` from the AMPAL object.
Parameters
----------
ligands : bool, optional
If true, will include ligand `Monomers`.
"""
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
return iter(monomers) | Retrieves all the `Monomers` from the AMPAL object.
Parameters
----------
ligands : bool, optional
If true, will include ligand `Monomers`. | Below is the the instruction that describes the task:
### Input:
Retrieves all the `Monomers` from the AMPAL object.
Parameters
----------
ligands : bool, optional
If true, will include ligand `Monomers`.
### Response:
def get_monomers(self, ligands=True):
"""Retrieves all the `Monomers` from the AMPAL object.
Parameters
----------
ligands : bool, optional
If true, will include ligand `Monomers`.
"""
if ligands and self.ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
return iter(monomers) |
def get_proxies(self, proxy_url=None):
"""GetProxies.
[Preview API]
:param str proxy_url:
:rtype: [Proxy]
"""
query_parameters = {}
if proxy_url is not None:
query_parameters['proxyUrl'] = self._serialize.query('proxy_url', proxy_url, 'str')
response = self._send(http_method='GET',
location_id='ec1f4311-f2b4-4c15-b2b8-8990b80d2908',
version='5.0-preview.2',
query_parameters=query_parameters)
return self._deserialize('[Proxy]', self._unwrap_collection(response)) | GetProxies.
[Preview API]
:param str proxy_url:
:rtype: [Proxy] | Below is the the instruction that describes the task:
### Input:
GetProxies.
[Preview API]
:param str proxy_url:
:rtype: [Proxy]
### Response:
def get_proxies(self, proxy_url=None):
"""GetProxies.
[Preview API]
:param str proxy_url:
:rtype: [Proxy]
"""
query_parameters = {}
if proxy_url is not None:
query_parameters['proxyUrl'] = self._serialize.query('proxy_url', proxy_url, 'str')
response = self._send(http_method='GET',
location_id='ec1f4311-f2b4-4c15-b2b8-8990b80d2908',
version='5.0-preview.2',
query_parameters=query_parameters)
return self._deserialize('[Proxy]', self._unwrap_collection(response)) |
def extent_string_to_array(extent_text):
"""Convert an extent string to an array.
.. versionadded: 2.2.0
:param extent_text: String representing an extent e.g.
109.829170982, -8.13333290561, 111.005344795, -7.49226294379
:type extent_text: str
:returns: A list of floats, or None
:rtype: list, None
"""
coordinates = extent_text.replace(' ', '').split(',')
count = len(coordinates)
if count != 4:
message = (
'Extent need exactly 4 value but got %s instead' % count)
LOGGER.error(message)
return None
# parse the value to float type
try:
coordinates = [float(i) for i in coordinates]
except ValueError as e:
message = str(e)
LOGGER.error(message)
return None
return coordinates | Convert an extent string to an array.
.. versionadded: 2.2.0
:param extent_text: String representing an extent e.g.
109.829170982, -8.13333290561, 111.005344795, -7.49226294379
:type extent_text: str
:returns: A list of floats, or None
:rtype: list, None | Below is the the instruction that describes the task:
### Input:
Convert an extent string to an array.
.. versionadded: 2.2.0
:param extent_text: String representing an extent e.g.
109.829170982, -8.13333290561, 111.005344795, -7.49226294379
:type extent_text: str
:returns: A list of floats, or None
:rtype: list, None
### Response:
def extent_string_to_array(extent_text):
"""Convert an extent string to an array.
.. versionadded: 2.2.0
:param extent_text: String representing an extent e.g.
109.829170982, -8.13333290561, 111.005344795, -7.49226294379
:type extent_text: str
:returns: A list of floats, or None
:rtype: list, None
"""
coordinates = extent_text.replace(' ', '').split(',')
count = len(coordinates)
if count != 4:
message = (
'Extent need exactly 4 value but got %s instead' % count)
LOGGER.error(message)
return None
# parse the value to float type
try:
coordinates = [float(i) for i in coordinates]
except ValueError as e:
message = str(e)
LOGGER.error(message)
return None
return coordinates |
def parents(self, alias, bank_id):
"""
URL for getting or setting parent relationships for the specified bank
:param alias:
:param bank_id:
:return:
"""
return self._root + self._safe_alias(alias) + '/parent/ids/' + bank_id | URL for getting or setting parent relationships for the specified bank
:param alias:
:param bank_id:
:return: | Below is the the instruction that describes the task:
### Input:
URL for getting or setting parent relationships for the specified bank
:param alias:
:param bank_id:
:return:
### Response:
def parents(self, alias, bank_id):
"""
URL for getting or setting parent relationships for the specified bank
:param alias:
:param bank_id:
:return:
"""
return self._root + self._safe_alias(alias) + '/parent/ids/' + bank_id |
def Parse(self, stat, file_object, knowledge_base):
"""Parse the History file."""
_, _ = stat, knowledge_base
# TODO(user): Convert this to use the far more intelligent plaso parser.
ie = IEParser(file_object)
for dat in ie.Parse():
yield rdf_webhistory.BrowserHistoryItem(
url=dat["url"],
domain=urlparse.urlparse(dat["url"]).netloc,
access_time=dat.get("mtime"),
program_name="Internet Explorer",
source_urn=file_object.urn) | Parse the History file. | Below is the the instruction that describes the task:
### Input:
Parse the History file.
### Response:
def Parse(self, stat, file_object, knowledge_base):
"""Parse the History file."""
_, _ = stat, knowledge_base
# TODO(user): Convert this to use the far more intelligent plaso parser.
ie = IEParser(file_object)
for dat in ie.Parse():
yield rdf_webhistory.BrowserHistoryItem(
url=dat["url"],
domain=urlparse.urlparse(dat["url"]).netloc,
access_time=dat.get("mtime"),
program_name="Internet Explorer",
source_urn=file_object.urn) |
def com_google_fonts_check_family_panose_familytype(ttFonts):
"""Fonts have consistent PANOSE family type?"""
failed = False
familytype = None
for ttfont in ttFonts:
if familytype is None:
familytype = ttfont['OS/2'].panose.bFamilyType
if familytype != ttfont['OS/2'].panose.bFamilyType:
failed = True
if failed:
yield FAIL, ("PANOSE family type is not"
" the same accross this family."
" In order to fix this,"
" please make sure that the panose.bFamilyType value"
" is the same in the OS/2 table of all of this family"
" font files.")
else:
yield PASS, "Fonts have consistent PANOSE family type." | Fonts have consistent PANOSE family type? | Below is the the instruction that describes the task:
### Input:
Fonts have consistent PANOSE family type?
### Response:
def com_google_fonts_check_family_panose_familytype(ttFonts):
"""Fonts have consistent PANOSE family type?"""
failed = False
familytype = None
for ttfont in ttFonts:
if familytype is None:
familytype = ttfont['OS/2'].panose.bFamilyType
if familytype != ttfont['OS/2'].panose.bFamilyType:
failed = True
if failed:
yield FAIL, ("PANOSE family type is not"
" the same accross this family."
" In order to fix this,"
" please make sure that the panose.bFamilyType value"
" is the same in the OS/2 table of all of this family"
" font files.")
else:
yield PASS, "Fonts have consistent PANOSE family type." |
def header(self, title, level, key, width=80):
"""Example::
.. _header_2:
Header 2
-------------------------------------------------------------------
**中文文档**
"""
linestyle_code = {1: "=", 2: "-", 3: "~"}
if level not in linestyle_code:
raise Exception("'level' argument has to be 1, 2 or 3")
else:
linestyle = linestyle_code[level]
if key:
return "\n".join([".. _%s" % key, "", title, linestyle * width])
else:
return "\n".join([title, "=" * width]) | Example::
.. _header_2:
Header 2
-------------------------------------------------------------------
**中文文档** | Below is the the instruction that describes the task:
### Input:
Example::
.. _header_2:
Header 2
-------------------------------------------------------------------
**中文文档**
### Response:
def header(self, title, level, key, width=80):
"""Example::
.. _header_2:
Header 2
-------------------------------------------------------------------
**中文文档**
"""
linestyle_code = {1: "=", 2: "-", 3: "~"}
if level not in linestyle_code:
raise Exception("'level' argument has to be 1, 2 or 3")
else:
linestyle = linestyle_code[level]
if key:
return "\n".join([".. _%s" % key, "", title, linestyle * width])
else:
return "\n".join([title, "=" * width]) |
def import_data_to_restful_server(args, content):
'''call restful server to import data to the experiment'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
running, _ = check_rest_server_quick(rest_port)
if running:
response = rest_post(import_data_url(rest_port), content, REST_TIME_OUT)
if response and check_response(response):
return response
else:
print_error('Restful server is not running...')
return None | call restful server to import data to the experiment | Below is the the instruction that describes the task:
### Input:
call restful server to import data to the experiment
### Response:
def import_data_to_restful_server(args, content):
'''call restful server to import data to the experiment'''
nni_config = Config(get_config_filename(args))
rest_port = nni_config.get_config('restServerPort')
running, _ = check_rest_server_quick(rest_port)
if running:
response = rest_post(import_data_url(rest_port), content, REST_TIME_OUT)
if response and check_response(response):
return response
else:
print_error('Restful server is not running...')
return None |
def set_texture(script, textName="TEMP3D.png", textDim=1024):
"""Set texture
"""
filter_xml = ''.join([
' <filter name="Set Texture">\n',
' <Param name="textName"',
'value="%s"' % textName,
'description="Texture file"',
'type="RichString"',
'tooltip="If the file exists it will be associated to the mesh else a dummy one will be created"',
'/>\n',
' <Param name="textDim"',
'value="%d"' % textDim,
'description="Texture Dimension (px)"',
'type="RichInt"',
'tooltip="If the named texture doesn\'t exists the dummy one will be squared with this size"',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | Set texture | Below is the the instruction that describes the task:
### Input:
Set texture
### Response:
def set_texture(script, textName="TEMP3D.png", textDim=1024):
"""Set texture
"""
filter_xml = ''.join([
' <filter name="Set Texture">\n',
' <Param name="textName"',
'value="%s"' % textName,
'description="Texture file"',
'type="RichString"',
'tooltip="If the file exists it will be associated to the mesh else a dummy one will be created"',
'/>\n',
' <Param name="textDim"',
'value="%d"' % textDim,
'description="Texture Dimension (px)"',
'type="RichInt"',
'tooltip="If the named texture doesn\'t exists the dummy one will be squared with this size"',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def _rpm_py_has_popt_devel_dep(self):
"""Check if the RPM Python binding has a depndency to popt-devel.
Search include header files in the source code to check it.
"""
found = False
with open('../include/rpm/rpmlib.h') as f_in:
for line in f_in:
if re.match(r'^#include .*popt.h.*$', line):
found = True
break
return found | Check if the RPM Python binding has a depndency to popt-devel.
Search include header files in the source code to check it. | Below is the the instruction that describes the task:
### Input:
Check if the RPM Python binding has a depndency to popt-devel.
Search include header files in the source code to check it.
### Response:
def _rpm_py_has_popt_devel_dep(self):
"""Check if the RPM Python binding has a depndency to popt-devel.
Search include header files in the source code to check it.
"""
found = False
with open('../include/rpm/rpmlib.h') as f_in:
for line in f_in:
if re.match(r'^#include .*popt.h.*$', line):
found = True
break
return found |
def _compute_site_amplification(cls, sites, coeffs):
"""
Compute equation (8):
``G = p*log(VS30) + q``
"""
return coeffs['p']*np.log10(sites.vs30) + coeffs['q'] | Compute equation (8):
``G = p*log(VS30) + q`` | Below is the the instruction that describes the task:
### Input:
Compute equation (8):
``G = p*log(VS30) + q``
### Response:
def _compute_site_amplification(cls, sites, coeffs):
"""
Compute equation (8):
``G = p*log(VS30) + q``
"""
return coeffs['p']*np.log10(sites.vs30) + coeffs['q'] |
def change_password(self):
"""
View function for a user to change their password.
Supports html and json requests.
"""
form = self._get_form('SECURITY_CHANGE_PASSWORD_FORM')
if form.validate_on_submit():
self.security_service.change_password(
current_user._get_current_object(),
form.new_password.data)
self.after_this_request(self._commit)
self.flash(_('flask_unchained.bundles.security:flash.password_change'),
category='success')
if request.is_json:
return self.jsonify({'token': current_user.get_auth_token()})
return self.redirect('SECURITY_POST_CHANGE_REDIRECT_ENDPOINT',
'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT')
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render('change_password',
change_password_form=form,
**self.security.run_ctx_processor('change_password')) | View function for a user to change their password.
Supports html and json requests. | Below is the the instruction that describes the task:
### Input:
View function for a user to change their password.
Supports html and json requests.
### Response:
def change_password(self):
"""
View function for a user to change their password.
Supports html and json requests.
"""
form = self._get_form('SECURITY_CHANGE_PASSWORD_FORM')
if form.validate_on_submit():
self.security_service.change_password(
current_user._get_current_object(),
form.new_password.data)
self.after_this_request(self._commit)
self.flash(_('flask_unchained.bundles.security:flash.password_change'),
category='success')
if request.is_json:
return self.jsonify({'token': current_user.get_auth_token()})
return self.redirect('SECURITY_POST_CHANGE_REDIRECT_ENDPOINT',
'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT')
elif form.errors and request.is_json:
return self.errors(form.errors)
return self.render('change_password',
change_password_form=form,
**self.security.run_ctx_processor('change_password')) |
def execute_ping(host_list, remote_user, remote_pass,
sudo=False, sudo_user=None, sudo_pass=None):
'''
Execute ls on some hosts
'''
runner = spam.ansirunner.AnsibleRunner()
result, failed_hosts = runner.ansible_perform_operation(
host_list=host_list,
remote_user=remote_user,
remote_pass=remote_pass,
sudo=sudo,
sudo_pass=sudo_pass,
sudo_user=sudo_user,
module="ping")
print result, failed_hosts
dark_hosts = runner.ansible_get_dark_hosts(result)
print "dark hosts: ", dark_hosts | Execute ls on some hosts | Below is the the instruction that describes the task:
### Input:
Execute ls on some hosts
### Response:
def execute_ping(host_list, remote_user, remote_pass,
sudo=False, sudo_user=None, sudo_pass=None):
'''
Execute ls on some hosts
'''
runner = spam.ansirunner.AnsibleRunner()
result, failed_hosts = runner.ansible_perform_operation(
host_list=host_list,
remote_user=remote_user,
remote_pass=remote_pass,
sudo=sudo,
sudo_pass=sudo_pass,
sudo_user=sudo_user,
module="ping")
print result, failed_hosts
dark_hosts = runner.ansible_get_dark_hosts(result)
print "dark hosts: ", dark_hosts |
def search_datasets(dataset_id=None,
dataset_name=None,
collection_name=None,
data_type=None,
unit_id=None,
scenario_id=None,
metadata_key=None,
metadata_val=None,
attr_id = None,
type_id = None,
unconnected = None,
inc_metadata='N',
inc_val = 'N',
page_start = 0,
page_size = 2000,
**kwargs):
"""
Get multiple datasets, based on several
filters. If all filters are set to None, all
datasets in the DB (that the user is allowe to see)
will be returned.
"""
log.info("Searching datasets: \ndatset_id: %s,\n"
"datset_name: %s,\n"
"collection_name: %s,\n"
"data_type: %s,\n"
"unit_id: %s,\n"
"scenario_id: %s,\n"
"metadata_key: %s,\n"
"metadata_val: %s,\n"
"attr_id: %s,\n"
"type_id: %s,\n"
"unconnected: %s,\n"
"inc_metadata: %s,\n"
"inc_val: %s,\n"
"page_start: %s,\n"
"page_size: %s" % (dataset_id,
dataset_name,
collection_name,
data_type,
unit_id,
scenario_id,
metadata_key,
metadata_val,
attr_id,
type_id,
unconnected,
inc_metadata,
inc_val,
page_start,
page_size))
if page_size is None:
page_size = config.get('SEARCH', 'page_size', 2000)
user_id = int(kwargs.get('user_id'))
dataset_qry = db.DBSession.query(Dataset.id,
Dataset.type,
Dataset.unit_id,
Dataset.name,
Dataset.hidden,
Dataset.cr_date,
Dataset.created_by,
DatasetOwner.user_id,
null().label('metadata'),
Dataset.value
)
#Dataset ID is unique, so there's no point using the other filters.
#Only use other filters if the datset ID is not specified.
if dataset_id is not None:
dataset_qry = dataset_qry.filter(
Dataset.id==dataset_id)
else:
if dataset_name is not None:
dataset_qry = dataset_qry.filter(
func.lower(Dataset.name).like("%%%s%%"%dataset_name.lower())
)
if collection_name is not None:
dc = aliased(DatasetCollection)
dci = aliased(DatasetCollectionItem)
dataset_qry = dataset_qry.join(dc,
func.lower(dc.name).like("%%%s%%"%collection_name.lower())
).join(dci,and_(
dci.collection_id == dc.id,
dci.dataset_id == Dataset.id))
if data_type is not None:
dataset_qry = dataset_qry.filter(
func.lower(Dataset.type) == data_type.lower())
#null is a valid unit, so we need a way for the searcher
#to specify that they want to search for datasets with a null unit
#rather than ignoring the unit. We use 'null' to do this.
if unit_id is not None:
dataset_qry = dataset_qry.filter(
Dataset.unit_id == unit_id)
if scenario_id is not None:
dataset_qry = dataset_qry.join(ResourceScenario,
and_(ResourceScenario.dataset_id == Dataset.id,
ResourceScenario.scenario_id == scenario_id))
if attr_id is not None:
dataset_qry = dataset_qry.join(
ResourceScenario, ResourceScenario.dataset_id == Dataset.id).join(
ResourceAttr, and_(ResourceAttr.id==ResourceScenario.resource_attr_id,
ResourceAttr.attr_id==attr_id))
if type_id is not None:
dataset_qry = dataset_qry.join(
ResourceScenario, ResourceScenario.dataset_id == Dataset.id).join(
ResourceAttr, ResourceAttr.id==ResourceScenario.resource_attr_id).join(
TypeAttr, and_(TypeAttr.attr_id==ResourceAttr.attr_id, TypeAttr.type_id==type_id))
if unconnected == 'Y':
stmt = db.DBSession.query(distinct(ResourceScenario.dataset_id).label('dataset_id'),
literal_column("0").label('col')).subquery()
dataset_qry = dataset_qry.outerjoin(
stmt, stmt.c.dataset_id == Dataset.id)
dataset_qry = dataset_qry.filter(stmt.c.col == None)
elif unconnected == 'N':
#The dataset has to be connected to something
stmt = db.DBSession.query(distinct(ResourceScenario.dataset_id).label('dataset_id'),
literal_column("0").label('col')).subquery()
dataset_qry = dataset_qry.join(
stmt, stmt.c.dataset_id == Dataset.id)
if metadata_key is not None and metadata_val is not None:
dataset_qry = dataset_qry.join(Metadata,
and_(Metadata.dataset_id == Dataset.id,
func.lower(Metadata.key).like("%%%s%%"%metadata_key.lower()),
func.lower(Metadata.value).like("%%%s%%"%metadata_val.lower())))
elif metadata_key is not None and metadata_val is None:
dataset_qry = dataset_qry.join(Metadata,
and_(Metadata.dataset_id == Dataset.id,
func.lower(Metadata.key).like("%%%s%%"%metadata_key.lower())))
elif metadata_key is None and metadata_val is not None:
dataset_qry = dataset_qry.join(Metadata,
and_(Metadata.dataset_id == Dataset.id,
func.lower(Metadata.value).like("%%%s%%"%metadata_val.lower())))
#All datasets must be joined on dataset owner so only datasets that the
#user can see are retrieved.
dataset_qry = dataset_qry.outerjoin(DatasetOwner,
and_(DatasetOwner.dataset_id==Dataset.id,
DatasetOwner.user_id==user_id))
dataset_qry = dataset_qry.filter(or_(Dataset.hidden=='N', and_(DatasetOwner.user_id is not None, Dataset.hidden=='Y')))
log.info(str(dataset_qry))
datasets = dataset_qry.all()
log.info("Retrieved %s datasets", len(datasets))
#page the datasets:
if page_start + page_size > len(datasets):
page_end = None
else:
page_end = page_start + page_size
datasets = datasets[page_start:page_end]
log.info("Datasets paged from result %s to %s", page_start, page_end)
datasets_to_return = []
for dataset_row in datasets:
dataset_dict = dataset_row._asdict()
if inc_val == 'N':
dataset_dict['value'] = None
else:
#convert the value row into a string as it is returned as a binary
if dataset_row.value is not None:
dataset_dict['value'] = str(dataset_row.value)
if inc_metadata=='Y':
metadata = db.DBSession.query(Metadata).filter(Metadata.dataset_id==dataset_row.dataset_id).all()
dataset_dict['metadata'] = metadata
else:
dataset_dict['metadata'] = []
dataset = namedtuple('Dataset', dataset_dict.keys())(**dataset_dict)
datasets_to_return.append(dataset)
return datasets_to_return | Get multiple datasets, based on several
filters. If all filters are set to None, all
datasets in the DB (that the user is allowe to see)
will be returned. | Below is the the instruction that describes the task:
### Input:
Get multiple datasets, based on several
filters. If all filters are set to None, all
datasets in the DB (that the user is allowe to see)
will be returned.
### Response:
def search_datasets(dataset_id=None,
dataset_name=None,
collection_name=None,
data_type=None,
unit_id=None,
scenario_id=None,
metadata_key=None,
metadata_val=None,
attr_id = None,
type_id = None,
unconnected = None,
inc_metadata='N',
inc_val = 'N',
page_start = 0,
page_size = 2000,
**kwargs):
"""
Get multiple datasets, based on several
filters. If all filters are set to None, all
datasets in the DB (that the user is allowe to see)
will be returned.
"""
log.info("Searching datasets: \ndatset_id: %s,\n"
"datset_name: %s,\n"
"collection_name: %s,\n"
"data_type: %s,\n"
"unit_id: %s,\n"
"scenario_id: %s,\n"
"metadata_key: %s,\n"
"metadata_val: %s,\n"
"attr_id: %s,\n"
"type_id: %s,\n"
"unconnected: %s,\n"
"inc_metadata: %s,\n"
"inc_val: %s,\n"
"page_start: %s,\n"
"page_size: %s" % (dataset_id,
dataset_name,
collection_name,
data_type,
unit_id,
scenario_id,
metadata_key,
metadata_val,
attr_id,
type_id,
unconnected,
inc_metadata,
inc_val,
page_start,
page_size))
if page_size is None:
page_size = config.get('SEARCH', 'page_size', 2000)
user_id = int(kwargs.get('user_id'))
dataset_qry = db.DBSession.query(Dataset.id,
Dataset.type,
Dataset.unit_id,
Dataset.name,
Dataset.hidden,
Dataset.cr_date,
Dataset.created_by,
DatasetOwner.user_id,
null().label('metadata'),
Dataset.value
)
#Dataset ID is unique, so there's no point using the other filters.
#Only use other filters if the datset ID is not specified.
if dataset_id is not None:
dataset_qry = dataset_qry.filter(
Dataset.id==dataset_id)
else:
if dataset_name is not None:
dataset_qry = dataset_qry.filter(
func.lower(Dataset.name).like("%%%s%%"%dataset_name.lower())
)
if collection_name is not None:
dc = aliased(DatasetCollection)
dci = aliased(DatasetCollectionItem)
dataset_qry = dataset_qry.join(dc,
func.lower(dc.name).like("%%%s%%"%collection_name.lower())
).join(dci,and_(
dci.collection_id == dc.id,
dci.dataset_id == Dataset.id))
if data_type is not None:
dataset_qry = dataset_qry.filter(
func.lower(Dataset.type) == data_type.lower())
#null is a valid unit, so we need a way for the searcher
#to specify that they want to search for datasets with a null unit
#rather than ignoring the unit. We use 'null' to do this.
if unit_id is not None:
dataset_qry = dataset_qry.filter(
Dataset.unit_id == unit_id)
if scenario_id is not None:
dataset_qry = dataset_qry.join(ResourceScenario,
and_(ResourceScenario.dataset_id == Dataset.id,
ResourceScenario.scenario_id == scenario_id))
if attr_id is not None:
dataset_qry = dataset_qry.join(
ResourceScenario, ResourceScenario.dataset_id == Dataset.id).join(
ResourceAttr, and_(ResourceAttr.id==ResourceScenario.resource_attr_id,
ResourceAttr.attr_id==attr_id))
if type_id is not None:
dataset_qry = dataset_qry.join(
ResourceScenario, ResourceScenario.dataset_id == Dataset.id).join(
ResourceAttr, ResourceAttr.id==ResourceScenario.resource_attr_id).join(
TypeAttr, and_(TypeAttr.attr_id==ResourceAttr.attr_id, TypeAttr.type_id==type_id))
if unconnected == 'Y':
stmt = db.DBSession.query(distinct(ResourceScenario.dataset_id).label('dataset_id'),
literal_column("0").label('col')).subquery()
dataset_qry = dataset_qry.outerjoin(
stmt, stmt.c.dataset_id == Dataset.id)
dataset_qry = dataset_qry.filter(stmt.c.col == None)
elif unconnected == 'N':
#The dataset has to be connected to something
stmt = db.DBSession.query(distinct(ResourceScenario.dataset_id).label('dataset_id'),
literal_column("0").label('col')).subquery()
dataset_qry = dataset_qry.join(
stmt, stmt.c.dataset_id == Dataset.id)
if metadata_key is not None and metadata_val is not None:
dataset_qry = dataset_qry.join(Metadata,
and_(Metadata.dataset_id == Dataset.id,
func.lower(Metadata.key).like("%%%s%%"%metadata_key.lower()),
func.lower(Metadata.value).like("%%%s%%"%metadata_val.lower())))
elif metadata_key is not None and metadata_val is None:
dataset_qry = dataset_qry.join(Metadata,
and_(Metadata.dataset_id == Dataset.id,
func.lower(Metadata.key).like("%%%s%%"%metadata_key.lower())))
elif metadata_key is None and metadata_val is not None:
dataset_qry = dataset_qry.join(Metadata,
and_(Metadata.dataset_id == Dataset.id,
func.lower(Metadata.value).like("%%%s%%"%metadata_val.lower())))
#All datasets must be joined on dataset owner so only datasets that the
#user can see are retrieved.
dataset_qry = dataset_qry.outerjoin(DatasetOwner,
and_(DatasetOwner.dataset_id==Dataset.id,
DatasetOwner.user_id==user_id))
dataset_qry = dataset_qry.filter(or_(Dataset.hidden=='N', and_(DatasetOwner.user_id is not None, Dataset.hidden=='Y')))
log.info(str(dataset_qry))
datasets = dataset_qry.all()
log.info("Retrieved %s datasets", len(datasets))
#page the datasets:
if page_start + page_size > len(datasets):
page_end = None
else:
page_end = page_start + page_size
datasets = datasets[page_start:page_end]
log.info("Datasets paged from result %s to %s", page_start, page_end)
datasets_to_return = []
for dataset_row in datasets:
dataset_dict = dataset_row._asdict()
if inc_val == 'N':
dataset_dict['value'] = None
else:
#convert the value row into a string as it is returned as a binary
if dataset_row.value is not None:
dataset_dict['value'] = str(dataset_row.value)
if inc_metadata=='Y':
metadata = db.DBSession.query(Metadata).filter(Metadata.dataset_id==dataset_row.dataset_id).all()
dataset_dict['metadata'] = metadata
else:
dataset_dict['metadata'] = []
dataset = namedtuple('Dataset', dataset_dict.keys())(**dataset_dict)
datasets_to_return.append(dataset)
return datasets_to_return |
def _FormatTag(self, event):
"""Formats the event tag.
Args:
event (EventObject): event.
Returns:
str: event tag field.
"""
tag = getattr(event, 'tag', None)
if not tag:
return '-'
return ' '.join(tag.labels) | Formats the event tag.
Args:
event (EventObject): event.
Returns:
str: event tag field. | Below is the the instruction that describes the task:
### Input:
Formats the event tag.
Args:
event (EventObject): event.
Returns:
str: event tag field.
### Response:
def _FormatTag(self, event):
"""Formats the event tag.
Args:
event (EventObject): event.
Returns:
str: event tag field.
"""
tag = getattr(event, 'tag', None)
if not tag:
return '-'
return ' '.join(tag.labels) |
def insert_rows(self, table, rows, selected_fields=None, **kwargs):
"""Insert rows into a table via the streaming API.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
rows (Union[ \
Sequence[Tuple], \
Sequence[dict], \
]):
Row data to be inserted. If a list of tuples is given, each
tuple should contain data for each schema field on the
current table and in the same order as the schema fields. If
a list of dictionaries is given, the keys must include all
required fields in the schema. Keys which do not correspond
to a field in the schema are ignored.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
kwargs (dict):
Keyword arguments to
:meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
Raises:
ValueError: if table's schema is not set
"""
table = _table_arg_to_table(table, default_project=self.project)
if not isinstance(table, Table):
raise TypeError(_NEED_TABLE_ARGUMENT)
schema = table.schema
# selected_fields can override the table schema.
if selected_fields is not None:
schema = selected_fields
if len(schema) == 0:
raise ValueError(
(
"Could not determine schema for table '{}'. Call client.get_table() "
"or pass in a list of schema fields to the selected_fields argument."
).format(table)
)
json_rows = [_record_field_to_json(schema, row) for row in rows]
return self.insert_rows_json(table, json_rows, **kwargs) | Insert rows into a table via the streaming API.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
rows (Union[ \
Sequence[Tuple], \
Sequence[dict], \
]):
Row data to be inserted. If a list of tuples is given, each
tuple should contain data for each schema field on the
current table and in the same order as the schema fields. If
a list of dictionaries is given, the keys must include all
required fields in the schema. Keys which do not correspond
to a field in the schema are ignored.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
kwargs (dict):
Keyword arguments to
:meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
Raises:
ValueError: if table's schema is not set | Below is the the instruction that describes the task:
### Input:
Insert rows into a table via the streaming API.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
rows (Union[ \
Sequence[Tuple], \
Sequence[dict], \
]):
Row data to be inserted. If a list of tuples is given, each
tuple should contain data for each schema field on the
current table and in the same order as the schema fields. If
a list of dictionaries is given, the keys must include all
required fields in the schema. Keys which do not correspond
to a field in the schema are ignored.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
kwargs (dict):
Keyword arguments to
:meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
Raises:
ValueError: if table's schema is not set
### Response:
def insert_rows(self, table, rows, selected_fields=None, **kwargs):
"""Insert rows into a table via the streaming API.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
rows (Union[ \
Sequence[Tuple], \
Sequence[dict], \
]):
Row data to be inserted. If a list of tuples is given, each
tuple should contain data for each schema field on the
current table and in the same order as the schema fields. If
a list of dictionaries is given, the keys must include all
required fields in the schema. Keys which do not correspond
to a field in the schema are ignored.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
kwargs (dict):
Keyword arguments to
:meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
Raises:
ValueError: if table's schema is not set
"""
table = _table_arg_to_table(table, default_project=self.project)
if not isinstance(table, Table):
raise TypeError(_NEED_TABLE_ARGUMENT)
schema = table.schema
# selected_fields can override the table schema.
if selected_fields is not None:
schema = selected_fields
if len(schema) == 0:
raise ValueError(
(
"Could not determine schema for table '{}'. Call client.get_table() "
"or pass in a list of schema fields to the selected_fields argument."
).format(table)
)
json_rows = [_record_field_to_json(schema, row) for row in rows]
return self.insert_rows_json(table, json_rows, **kwargs) |
def BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5,
center=True, scale=True, **unused_kwargs):
"""Layer construction function for a batch normalization layer."""
mean = np.mean(x, axis, keepdims=True)
# Fast but less numerically-stable variance calculation than np.var.
m1 = np.mean(x**2, axis, keepdims=True)
var = m1 - mean**2
z = (x - mean) / np.sqrt(var + epsilon)
# Expand the parameters to have the right axes.
beta, gamma = params
# TODO(phawkins): np.expand_dims should accept an axis tuple.
# (https://github.com/numpy/numpy/issues/12290)
ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x)))
beta = beta[ed]
gamma = gamma[ed]
# Return the z rescaled by the parameters if requested.
if center and scale:
return gamma * z + beta
if center:
return z + beta
if scale:
return gamma * z
return z | Layer construction function for a batch normalization layer. | Below is the the instruction that describes the task:
### Input:
Layer construction function for a batch normalization layer.
### Response:
def BatchNorm(x, params, axis=(0, 1, 2), epsilon=1e-5,
center=True, scale=True, **unused_kwargs):
"""Layer construction function for a batch normalization layer."""
mean = np.mean(x, axis, keepdims=True)
# Fast but less numerically-stable variance calculation than np.var.
m1 = np.mean(x**2, axis, keepdims=True)
var = m1 - mean**2
z = (x - mean) / np.sqrt(var + epsilon)
# Expand the parameters to have the right axes.
beta, gamma = params
# TODO(phawkins): np.expand_dims should accept an axis tuple.
# (https://github.com/numpy/numpy/issues/12290)
ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x)))
beta = beta[ed]
gamma = gamma[ed]
# Return the z rescaled by the parameters if requested.
if center and scale:
return gamma * z + beta
if center:
return z + beta
if scale:
return gamma * z
return z |
def primary_keys_full(cls):
"""Get primary key properties for a SQLAlchemy cls.
Taken from marshmallow_sqlalchemy
"""
mapper = cls.__mapper__
return [
mapper.get_property_by_column(column)
for column in mapper.primary_key
] | Get primary key properties for a SQLAlchemy cls.
Taken from marshmallow_sqlalchemy | Below is the the instruction that describes the task:
### Input:
Get primary key properties for a SQLAlchemy cls.
Taken from marshmallow_sqlalchemy
### Response:
def primary_keys_full(cls):
"""Get primary key properties for a SQLAlchemy cls.
Taken from marshmallow_sqlalchemy
"""
mapper = cls.__mapper__
return [
mapper.get_property_by_column(column)
for column in mapper.primary_key
] |
def set_title(self, title):
"""Sets the title.
arg: title (string): the new title
raise: InvalidArgument - ``title`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``title`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.set_title_template
self._my_map['title'] = self._get_display_text(title, self.get_title_metadata()) | Sets the title.
arg: title (string): the new title
raise: InvalidArgument - ``title`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``title`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the title.
arg: title (string): the new title
raise: InvalidArgument - ``title`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``title`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_title(self, title):
"""Sets the title.
arg: title (string): the new title
raise: InvalidArgument - ``title`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``title`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetForm.set_title_template
self._my_map['title'] = self._get_display_text(title, self.get_title_metadata()) |
def checkParametersInputFile(filename):
"""Check the do_x3dna output file and return list of parameters present in the file.
"""
fin = open(filename, 'r')
line = fin.readline()
line2 = fin.readline()
fin.close()
temp = re.split('\s+', line)
temp2 = re.split('\s+', line2)
if temp[0] == '#Minor':
return groovesParameters
if temp[0] == '#Shift':
return baseStepParameters
if temp[0] == '#X-disp':
return helicalBaseStepParameters
if temp[0] == '#Shear':
return basePairParameters
if temp[0] == '#Position':
return helicalAxisParameters
if temp2[0] == '#P':
return helicalRadiusParameters
if temp2[0] == '#alpha':
return backboneDihedrals | Check the do_x3dna output file and return list of parameters present in the file. | Below is the the instruction that describes the task:
### Input:
Check the do_x3dna output file and return list of parameters present in the file.
### Response:
def checkParametersInputFile(filename):
"""Check the do_x3dna output file and return list of parameters present in the file.
"""
fin = open(filename, 'r')
line = fin.readline()
line2 = fin.readline()
fin.close()
temp = re.split('\s+', line)
temp2 = re.split('\s+', line2)
if temp[0] == '#Minor':
return groovesParameters
if temp[0] == '#Shift':
return baseStepParameters
if temp[0] == '#X-disp':
return helicalBaseStepParameters
if temp[0] == '#Shear':
return basePairParameters
if temp[0] == '#Position':
return helicalAxisParameters
if temp2[0] == '#P':
return helicalRadiusParameters
if temp2[0] == '#alpha':
return backboneDihedrals |
def save(self):
"""
Saves changes made to the locally cached SecurityDocument object's data
structures to the remote database.
"""
resp = self.r_session.put(
self.document_url,
data=self.json(),
headers={'Content-Type': 'application/json'}
)
resp.raise_for_status() | Saves changes made to the locally cached SecurityDocument object's data
structures to the remote database. | Below is the the instruction that describes the task:
### Input:
Saves changes made to the locally cached SecurityDocument object's data
structures to the remote database.
### Response:
def save(self):
"""
Saves changes made to the locally cached SecurityDocument object's data
structures to the remote database.
"""
resp = self.r_session.put(
self.document_url,
data=self.json(),
headers={'Content-Type': 'application/json'}
)
resp.raise_for_status() |
def Sigmoid(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the sigmoid function to a vertex.
The sigmoid function is a special case of the Logistic function.
:param input_vertex: the vertex
"""
return Double(context.jvm_view().SigmoidVertex, label, cast_to_double_vertex(input_vertex)) | Applies the sigmoid function to a vertex.
The sigmoid function is a special case of the Logistic function.
:param input_vertex: the vertex | Below is the the instruction that describes the task:
### Input:
Applies the sigmoid function to a vertex.
The sigmoid function is a special case of the Logistic function.
:param input_vertex: the vertex
### Response:
def Sigmoid(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the sigmoid function to a vertex.
The sigmoid function is a special case of the Logistic function.
:param input_vertex: the vertex
"""
return Double(context.jvm_view().SigmoidVertex, label, cast_to_double_vertex(input_vertex)) |
def synchronized(sync_lock):
"""A decorator synchronizing multi-process access to a resource."""
def wrapper(f):
"""The decorator's core function.
:param f:
:return:
"""
@functools.wraps(f)
def inner_wrapper(*args, **kw):
"""
:param args:
:param kw:
:return:
"""
with sync_lock:
return f(*args, **kw)
return inner_wrapper
return wrapper | A decorator synchronizing multi-process access to a resource. | Below is the the instruction that describes the task:
### Input:
A decorator synchronizing multi-process access to a resource.
### Response:
def synchronized(sync_lock):
"""A decorator synchronizing multi-process access to a resource."""
def wrapper(f):
"""The decorator's core function.
:param f:
:return:
"""
@functools.wraps(f)
def inner_wrapper(*args, **kw):
"""
:param args:
:param kw:
:return:
"""
with sync_lock:
return f(*args, **kw)
return inner_wrapper
return wrapper |
def choices(cls):
"""Returns a "choices" list of tuples. Each member of the list is one
of the possible items in the ``Enum``, with the first item in the pair
being the value and the second the name. This is compatible with
django's choices attribute in fields.
:returns:
List of tuples
"""
result = []
for name, member in cls.__members__.items():
result.append((member.value, name))
return result | Returns a "choices" list of tuples. Each member of the list is one
of the possible items in the ``Enum``, with the first item in the pair
being the value and the second the name. This is compatible with
django's choices attribute in fields.
:returns:
List of tuples | Below is the the instruction that describes the task:
### Input:
Returns a "choices" list of tuples. Each member of the list is one
of the possible items in the ``Enum``, with the first item in the pair
being the value and the second the name. This is compatible with
django's choices attribute in fields.
:returns:
List of tuples
### Response:
def choices(cls):
"""Returns a "choices" list of tuples. Each member of the list is one
of the possible items in the ``Enum``, with the first item in the pair
being the value and the second the name. This is compatible with
django's choices attribute in fields.
:returns:
List of tuples
"""
result = []
for name, member in cls.__members__.items():
result.append((member.value, name))
return result |
def get_selected_uuidtab(self):
# TODO DBUS ONLY
"""Returns the uuid of the current selected terminal
"""
page_num = self.get_notebook().get_current_page()
terminals = self.get_notebook().get_terminals_for_page(page_num)
return str(terminals[0].get_uuid()) | Returns the uuid of the current selected terminal | Below is the the instruction that describes the task:
### Input:
Returns the uuid of the current selected terminal
### Response:
def get_selected_uuidtab(self):
# TODO DBUS ONLY
"""Returns the uuid of the current selected terminal
"""
page_num = self.get_notebook().get_current_page()
terminals = self.get_notebook().get_terminals_for_page(page_num)
return str(terminals[0].get_uuid()) |
def read_write(
self,
index_group,
index_offset,
plc_read_datatype,
value,
plc_write_datatype,
return_ctypes=False,
):
# type: (int, int, Type, Any, Type, bool) -> Any
"""Read and write data synchronous from/to an ADS-device.
:param int index_group: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param int plc_read_datatype: type of the data given to the PLC to
respond to, according to PLCTYPE constants
:param value: value to write to the storage address of the PLC
:param plc_write_datatype: type of the data given to the PLC,
according to PLCTYPE constants
:rtype: PLCTYPE
:param bool return_ctypes: return ctypes instead of python types if True
(default: False)
:return: value: **value**
"""
if self._port is not None:
return adsSyncReadWriteReqEx2(
self._port,
self._adr,
index_group,
index_offset,
plc_read_datatype,
value,
plc_write_datatype,
return_ctypes,
)
return None | Read and write data synchronous from/to an ADS-device.
:param int index_group: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param int plc_read_datatype: type of the data given to the PLC to
respond to, according to PLCTYPE constants
:param value: value to write to the storage address of the PLC
:param plc_write_datatype: type of the data given to the PLC,
according to PLCTYPE constants
:rtype: PLCTYPE
:param bool return_ctypes: return ctypes instead of python types if True
(default: False)
:return: value: **value** | Below is the the instruction that describes the task:
### Input:
Read and write data synchronous from/to an ADS-device.
:param int index_group: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param int plc_read_datatype: type of the data given to the PLC to
respond to, according to PLCTYPE constants
:param value: value to write to the storage address of the PLC
:param plc_write_datatype: type of the data given to the PLC,
according to PLCTYPE constants
:rtype: PLCTYPE
:param bool return_ctypes: return ctypes instead of python types if True
(default: False)
:return: value: **value**
### Response:
def read_write(
self,
index_group,
index_offset,
plc_read_datatype,
value,
plc_write_datatype,
return_ctypes=False,
):
# type: (int, int, Type, Any, Type, bool) -> Any
"""Read and write data synchronous from/to an ADS-device.
:param int index_group: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param int plc_read_datatype: type of the data given to the PLC to
respond to, according to PLCTYPE constants
:param value: value to write to the storage address of the PLC
:param plc_write_datatype: type of the data given to the PLC,
according to PLCTYPE constants
:rtype: PLCTYPE
:param bool return_ctypes: return ctypes instead of python types if True
(default: False)
:return: value: **value**
"""
if self._port is not None:
return adsSyncReadWriteReqEx2(
self._port,
self._adr,
index_group,
index_offset,
plc_read_datatype,
value,
plc_write_datatype,
return_ctypes,
)
return None |
def process_file(filename, source):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps)
position_field_type = sniff_field_spelling(mlog, source)
# init fields and field_types lists
fields = [args.source + "." + s for s in position_field_type]
fields.append(mainstate_field)
field_types = []
msg_types = set()
re_caps = re.compile('[A-Z_][A-Z0-9_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
add_data.new_linestring = True
add_data.mainstate_current = -1
add_data.current_kml_linestring = None
add_data.position_data = [None for n in position_field_type]
add_data.last_time = 0
while True:
msg = mlog.recv_match(args.condition)
if msg is None:
break
tdays = (msg._timestamp - time.timezone) / (24 * 60 * 60)
tdays += 719163 # pylab wants it since 0001-01-01
add_data(tdays, msg, msg_types, mlog.messages, fields, field_types, position_field_type) | process one file | Below is the the instruction that describes the task:
### Input:
process one file
### Response:
def process_file(filename, source):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps)
position_field_type = sniff_field_spelling(mlog, source)
# init fields and field_types lists
fields = [args.source + "." + s for s in position_field_type]
fields.append(mainstate_field)
field_types = []
msg_types = set()
re_caps = re.compile('[A-Z_][A-Z0-9_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
add_data.new_linestring = True
add_data.mainstate_current = -1
add_data.current_kml_linestring = None
add_data.position_data = [None for n in position_field_type]
add_data.last_time = 0
while True:
msg = mlog.recv_match(args.condition)
if msg is None:
break
tdays = (msg._timestamp - time.timezone) / (24 * 60 * 60)
tdays += 719163 # pylab wants it since 0001-01-01
add_data(tdays, msg, msg_types, mlog.messages, fields, field_types, position_field_type) |
def _check_available_disk_space(self, project):
"""
Sends a warning notification if disk space is getting low.
:param project: project instance
"""
try:
used_disk_space = psutil.disk_usage(project.path).percent
except FileNotFoundError:
log.warning('Could not find "{}" when checking for used disk space'.format(project.path))
return
# send a warning if used disk space is >= 90%
if used_disk_space >= 90:
message = 'Only {}% or less of disk space detected in "{}" on "{}"'.format(used_disk_space,
project.path,
platform.node())
log.warning(message)
project.emit("log.warning", {"message": message}) | Sends a warning notification if disk space is getting low.
:param project: project instance | Below is the the instruction that describes the task:
### Input:
Sends a warning notification if disk space is getting low.
:param project: project instance
### Response:
def _check_available_disk_space(self, project):
"""
Sends a warning notification if disk space is getting low.
:param project: project instance
"""
try:
used_disk_space = psutil.disk_usage(project.path).percent
except FileNotFoundError:
log.warning('Could not find "{}" when checking for used disk space'.format(project.path))
return
# send a warning if used disk space is >= 90%
if used_disk_space >= 90:
message = 'Only {}% or less of disk space detected in "{}" on "{}"'.format(used_disk_space,
project.path,
platform.node())
log.warning(message)
project.emit("log.warning", {"message": message}) |
def _set_get_vnetwork_vms(self, v, load=False):
"""
Setter method for get_vnetwork_vms, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vms (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_vms is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_vms() directly.
YANG Description: Shows discovered VMs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_vms.get_vnetwork_vms, is_leaf=True, yang_name="get-vnetwork-vms", rest_name="get-vnetwork-vms", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'vm-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_vnetwork_vms must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_vnetwork_vms.get_vnetwork_vms, is_leaf=True, yang_name="get-vnetwork-vms", rest_name="get-vnetwork-vms", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'vm-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""",
})
self.__get_vnetwork_vms = t
if hasattr(self, '_set'):
self._set() | Setter method for get_vnetwork_vms, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vms (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_vms is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_vms() directly.
YANG Description: Shows discovered VMs | Below is the the instruction that describes the task:
### Input:
Setter method for get_vnetwork_vms, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vms (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_vms is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_vms() directly.
YANG Description: Shows discovered VMs
### Response:
def _set_get_vnetwork_vms(self, v, load=False):
"""
Setter method for get_vnetwork_vms, mapped from YANG variable /brocade_vswitch_rpc/get_vnetwork_vms (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_get_vnetwork_vms is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_get_vnetwork_vms() directly.
YANG Description: Shows discovered VMs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=get_vnetwork_vms.get_vnetwork_vms, is_leaf=True, yang_name="get-vnetwork-vms", rest_name="get-vnetwork-vms", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'vm-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """get_vnetwork_vms must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=get_vnetwork_vms.get_vnetwork_vms, is_leaf=True, yang_name="get-vnetwork-vms", rest_name="get-vnetwork-vms", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'vm-name'}}, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='rpc', is_config=True)""",
})
self.__get_vnetwork_vms = t
if hasattr(self, '_set'):
self._set() |
def tilt_model(params, shape):
"""lmfit tilt model"""
mx = params["mx"].value
my = params["my"].value
off = params["off"].value
bg = np.zeros(shape, dtype=float) + off
x = np.arange(bg.shape[0]) - bg.shape[0] // 2
y = np.arange(bg.shape[1]) - bg.shape[1] // 2
x = x.reshape(-1, 1)
y = y.reshape(1, -1)
bg += mx * x + my * y
return bg | lmfit tilt model | Below is the the instruction that describes the task:
### Input:
lmfit tilt model
### Response:
def tilt_model(params, shape):
"""lmfit tilt model"""
mx = params["mx"].value
my = params["my"].value
off = params["off"].value
bg = np.zeros(shape, dtype=float) + off
x = np.arange(bg.shape[0]) - bg.shape[0] // 2
y = np.arange(bg.shape[1]) - bg.shape[1] // 2
x = x.reshape(-1, 1)
y = y.reshape(1, -1)
bg += mx * x + my * y
return bg |
def copy(self, new_id=None, attribute_overrides={}):
"""
Copies the DatabaseObject under the ID_KEY new_id.
@param new_id: the value for ID_KEY of the copy; if this is none,
creates the new object with a random ID_KEY
@param attribute_overrides: dictionary of attribute names -> values that you would like to override with.
"""
data = dict(self)
data.update(attribute_overrides)
if new_id is not None:
data[ID_KEY] = new_id
return self.create(data, path=self.PATH)
else:
del data[ID_KEY]
return self.create(data, random_id=True, path=self.PATH) | Copies the DatabaseObject under the ID_KEY new_id.
@param new_id: the value for ID_KEY of the copy; if this is none,
creates the new object with a random ID_KEY
@param attribute_overrides: dictionary of attribute names -> values that you would like to override with. | Below is the the instruction that describes the task:
### Input:
Copies the DatabaseObject under the ID_KEY new_id.
@param new_id: the value for ID_KEY of the copy; if this is none,
creates the new object with a random ID_KEY
@param attribute_overrides: dictionary of attribute names -> values that you would like to override with.
### Response:
def copy(self, new_id=None, attribute_overrides={}):
"""
Copies the DatabaseObject under the ID_KEY new_id.
@param new_id: the value for ID_KEY of the copy; if this is none,
creates the new object with a random ID_KEY
@param attribute_overrides: dictionary of attribute names -> values that you would like to override with.
"""
data = dict(self)
data.update(attribute_overrides)
if new_id is not None:
data[ID_KEY] = new_id
return self.create(data, path=self.PATH)
else:
del data[ID_KEY]
return self.create(data, random_id=True, path=self.PATH) |
def df(self):
'''## Experimental '''
if not hasattr(self, '_df'):
dfs = []
for symbol, bars in self.items():
df = bars.df.copy()
df.columns = pd.MultiIndex.from_product(
[[symbol, ], df.columns])
dfs.append(df)
if len(dfs) == 0:
self._df = pd.DataFrame()
else:
self._df = pd.concat(dfs, axis=1)
return self._df | ## Experimental | Below is the the instruction that describes the task:
### Input:
## Experimental
### Response:
def df(self):
'''## Experimental '''
if not hasattr(self, '_df'):
dfs = []
for symbol, bars in self.items():
df = bars.df.copy()
df.columns = pd.MultiIndex.from_product(
[[symbol, ], df.columns])
dfs.append(df)
if len(dfs) == 0:
self._df = pd.DataFrame()
else:
self._df = pd.concat(dfs, axis=1)
return self._df |
def update(self, eid, entity, note=None):
"""
update(int, dict, string):
Merges the supplied dict into the entity record with the supplied eid.
Places the supplied note as update message.
"""
# fetch the current entity
newentity = self.get(eid)
for field in ('createdAtDate', 'id', 'isActive', 'parentRevision', 'revisionNr', 'updatedAtDate', 'updatedByUserName','updatedFromIp'):
if field in newentity: del newentity[field]
# replace fields as specified in entity
for field in entity:
if field=='metadata':
for mfield in entity['metadata']:
newentity['metadata'][mfield] = entity['metadata'][mfield]
else:
newentity[field] = entity[field]
result = self.replace(eid,newentity,note=note)
return result | update(int, dict, string):
Merges the supplied dict into the entity record with the supplied eid.
Places the supplied note as update message. | Below is the the instruction that describes the task:
### Input:
update(int, dict, string):
Merges the supplied dict into the entity record with the supplied eid.
Places the supplied note as update message.
### Response:
def update(self, eid, entity, note=None):
"""
update(int, dict, string):
Merges the supplied dict into the entity record with the supplied eid.
Places the supplied note as update message.
"""
# fetch the current entity
newentity = self.get(eid)
for field in ('createdAtDate', 'id', 'isActive', 'parentRevision', 'revisionNr', 'updatedAtDate', 'updatedByUserName','updatedFromIp'):
if field in newentity: del newentity[field]
# replace fields as specified in entity
for field in entity:
if field=='metadata':
for mfield in entity['metadata']:
newentity['metadata'][mfield] = entity['metadata'][mfield]
else:
newentity[field] = entity[field]
result = self.replace(eid,newentity,note=note)
return result |
def get_category_tree(self, product, category_id, lcid=None, source=None, product_version=None, skus=None, sub_skus=None):
"""GetCategoryTree.
[Preview API]
:param str product:
:param str category_id:
:param int lcid:
:param str source:
:param str product_version:
:param str skus:
:param str sub_skus:
:rtype: :class:`<ProductCategory> <azure.devops.v5_1.gallery.models.ProductCategory>`
"""
route_values = {}
if product is not None:
route_values['product'] = self._serialize.url('product', product, 'str')
if category_id is not None:
route_values['categoryId'] = self._serialize.url('category_id', category_id, 'str')
query_parameters = {}
if lcid is not None:
query_parameters['lcid'] = self._serialize.query('lcid', lcid, 'int')
if source is not None:
query_parameters['source'] = self._serialize.query('source', source, 'str')
if product_version is not None:
query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str')
if skus is not None:
query_parameters['skus'] = self._serialize.query('skus', skus, 'str')
if sub_skus is not None:
query_parameters['subSkus'] = self._serialize.query('sub_skus', sub_skus, 'str')
response = self._send(http_method='GET',
location_id='1102bb42-82b0-4955-8d8a-435d6b4cedd3',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ProductCategory', response) | GetCategoryTree.
[Preview API]
:param str product:
:param str category_id:
:param int lcid:
:param str source:
:param str product_version:
:param str skus:
:param str sub_skus:
:rtype: :class:`<ProductCategory> <azure.devops.v5_1.gallery.models.ProductCategory>` | Below is the the instruction that describes the task:
### Input:
GetCategoryTree.
[Preview API]
:param str product:
:param str category_id:
:param int lcid:
:param str source:
:param str product_version:
:param str skus:
:param str sub_skus:
:rtype: :class:`<ProductCategory> <azure.devops.v5_1.gallery.models.ProductCategory>`
### Response:
def get_category_tree(self, product, category_id, lcid=None, source=None, product_version=None, skus=None, sub_skus=None):
"""GetCategoryTree.
[Preview API]
:param str product:
:param str category_id:
:param int lcid:
:param str source:
:param str product_version:
:param str skus:
:param str sub_skus:
:rtype: :class:`<ProductCategory> <azure.devops.v5_1.gallery.models.ProductCategory>`
"""
route_values = {}
if product is not None:
route_values['product'] = self._serialize.url('product', product, 'str')
if category_id is not None:
route_values['categoryId'] = self._serialize.url('category_id', category_id, 'str')
query_parameters = {}
if lcid is not None:
query_parameters['lcid'] = self._serialize.query('lcid', lcid, 'int')
if source is not None:
query_parameters['source'] = self._serialize.query('source', source, 'str')
if product_version is not None:
query_parameters['productVersion'] = self._serialize.query('product_version', product_version, 'str')
if skus is not None:
query_parameters['skus'] = self._serialize.query('skus', skus, 'str')
if sub_skus is not None:
query_parameters['subSkus'] = self._serialize.query('sub_skus', sub_skus, 'str')
response = self._send(http_method='GET',
location_id='1102bb42-82b0-4955-8d8a-435d6b4cedd3',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ProductCategory', response) |
def step_interpolation(x, xp, fp, **kwargs):
"""Multi-dimensional step interpolation.
Returns the multi-dimensional step interpolant to a function with
given discrete data points (xp, fp), evaluated at x.
Note that *N and *M indicate zero or more dimensions.
Args:
x: An array of shape [*N], the x-coordinates of the interpolated values.
xp: An np.array of shape [D], the x-coordinates of the data points, must be
increasing.
fp: An np.array of shape [D, *M], the y-coordinates of the data points.
**kwargs: Unused.
Returns:
An array of shape [*N, *M], the interpolated values.
"""
del kwargs # Unused.
xp = np.expand_dims(xp, -1)
lower, upper = xp[:-1], xp[1:]
conditions = (x >= lower) & (x < upper)
# Underflow and overflow conditions and values. Values default to fp[0] and
# fp[-1] respectively.
conditions = np.concatenate([[x < xp[0]], conditions, [x >= xp[-1]]])
values = np.concatenate([[fp[0]], fp])
assert np.all(np.sum(conditions, 0) == 1), 'xp must be increasing.'
indices = np.argmax(conditions, 0)
return values[indices].astype(np.float32) | Multi-dimensional step interpolation.
Returns the multi-dimensional step interpolant to a function with
given discrete data points (xp, fp), evaluated at x.
Note that *N and *M indicate zero or more dimensions.
Args:
x: An array of shape [*N], the x-coordinates of the interpolated values.
xp: An np.array of shape [D], the x-coordinates of the data points, must be
increasing.
fp: An np.array of shape [D, *M], the y-coordinates of the data points.
**kwargs: Unused.
Returns:
An array of shape [*N, *M], the interpolated values. | Below is the the instruction that describes the task:
### Input:
Multi-dimensional step interpolation.
Returns the multi-dimensional step interpolant to a function with
given discrete data points (xp, fp), evaluated at x.
Note that *N and *M indicate zero or more dimensions.
Args:
x: An array of shape [*N], the x-coordinates of the interpolated values.
xp: An np.array of shape [D], the x-coordinates of the data points, must be
increasing.
fp: An np.array of shape [D, *M], the y-coordinates of the data points.
**kwargs: Unused.
Returns:
An array of shape [*N, *M], the interpolated values.
### Response:
def step_interpolation(x, xp, fp, **kwargs):
"""Multi-dimensional step interpolation.
Returns the multi-dimensional step interpolant to a function with
given discrete data points (xp, fp), evaluated at x.
Note that *N and *M indicate zero or more dimensions.
Args:
x: An array of shape [*N], the x-coordinates of the interpolated values.
xp: An np.array of shape [D], the x-coordinates of the data points, must be
increasing.
fp: An np.array of shape [D, *M], the y-coordinates of the data points.
**kwargs: Unused.
Returns:
An array of shape [*N, *M], the interpolated values.
"""
del kwargs # Unused.
xp = np.expand_dims(xp, -1)
lower, upper = xp[:-1], xp[1:]
conditions = (x >= lower) & (x < upper)
# Underflow and overflow conditions and values. Values default to fp[0] and
# fp[-1] respectively.
conditions = np.concatenate([[x < xp[0]], conditions, [x >= xp[-1]]])
values = np.concatenate([[fp[0]], fp])
assert np.all(np.sum(conditions, 0) == 1), 'xp must be increasing.'
indices = np.argmax(conditions, 0)
return values[indices].astype(np.float32) |
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child) | If the given object is an instance of Child add it to self and
register self as a parent. | Below is the the instruction that describes the task:
### Input:
If the given object is an instance of Child add it to self and
register self as a parent.
### Response:
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child) |
def get_video_image_storage():
"""
Return the configured django storage backend.
"""
if hasattr(settings, 'VIDEO_IMAGE_SETTINGS'):
return get_storage_class(
settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_CLASS'),
)(**settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_KWARGS', {}))
else:
# during edx-platform loading this method gets called but settings are not ready yet
# so in that case we will return default(FileSystemStorage) storage class instance
return get_storage_class()() | Return the configured django storage backend. | Below is the the instruction that describes the task:
### Input:
Return the configured django storage backend.
### Response:
def get_video_image_storage():
"""
Return the configured django storage backend.
"""
if hasattr(settings, 'VIDEO_IMAGE_SETTINGS'):
return get_storage_class(
settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_CLASS'),
)(**settings.VIDEO_IMAGE_SETTINGS.get('STORAGE_KWARGS', {}))
else:
# during edx-platform loading this method gets called but settings are not ready yet
# so in that case we will return default(FileSystemStorage) storage class instance
return get_storage_class()() |
def next(self):
"""
Goes to the previous page for this wizard.
"""
curr_page = self.currentPage()
if not curr_page:
return
elif not curr_page.validatePage():
return
pageId = curr_page.nextId()
try:
next_page = self._pages[pageId]
except KeyError:
return
self._currentId = pageId
self._navigation.append(pageId)
y = curr_page.y()
next_page.move(self.width(), y)
# animate the last page in
anim_in = QtCore.QPropertyAnimation(self)
anim_in.setTargetObject(curr_page)
anim_in.setPropertyName('pos')
anim_in.setStartValue(curr_page.pos())
anim_in.setEndValue(QtCore.QPoint(-curr_page.width(), y))
anim_in.setDuration(self.animationSpeed())
anim_in.setEasingCurve(QtCore.QEasingCurve.Linear)
# animate the current page out
anim_out = QtCore.QPropertyAnimation(self)
anim_out.setTargetObject(next_page)
anim_out.setPropertyName('pos')
anim_out.setStartValue(next_page.pos())
anim_out.setEndValue(curr_page.pos())
anim_out.setDuration(self.animationSpeed())
anim_out.setEasingCurve(QtCore.QEasingCurve.Linear)
# create the anim group
anim_grp = QtCore.QParallelAnimationGroup(self)
anim_grp.addAnimation(anim_in)
anim_grp.addAnimation(anim_out)
anim_grp.finished.connect(curr_page.hide)
anim_grp.finished.connect(anim_grp.deleteLater)
next_page.show()
# update the button states
self._buttons[self.WizardButton.BackButton].setVisible(True)
self._buttons[self.WizardButton.NextButton].setVisible(self.canGoForward())
self._buttons[self.WizardButton.RetryButton].setVisible(self.canRetry())
self._buttons[self.WizardButton.CommitButton].setVisible(next_page.isCommitPage())
self._buttons[self.WizardButton.FinishButton].setVisible(next_page.isFinalPage())
self.adjustSize()
# initialize the new page
self.currentIdChanged.emit(pageId)
next_page.initializePage()
anim_grp.start() | Goes to the previous page for this wizard. | Below is the the instruction that describes the task:
### Input:
Goes to the previous page for this wizard.
### Response:
def next(self):
"""
Goes to the previous page for this wizard.
"""
curr_page = self.currentPage()
if not curr_page:
return
elif not curr_page.validatePage():
return
pageId = curr_page.nextId()
try:
next_page = self._pages[pageId]
except KeyError:
return
self._currentId = pageId
self._navigation.append(pageId)
y = curr_page.y()
next_page.move(self.width(), y)
# animate the last page in
anim_in = QtCore.QPropertyAnimation(self)
anim_in.setTargetObject(curr_page)
anim_in.setPropertyName('pos')
anim_in.setStartValue(curr_page.pos())
anim_in.setEndValue(QtCore.QPoint(-curr_page.width(), y))
anim_in.setDuration(self.animationSpeed())
anim_in.setEasingCurve(QtCore.QEasingCurve.Linear)
# animate the current page out
anim_out = QtCore.QPropertyAnimation(self)
anim_out.setTargetObject(next_page)
anim_out.setPropertyName('pos')
anim_out.setStartValue(next_page.pos())
anim_out.setEndValue(curr_page.pos())
anim_out.setDuration(self.animationSpeed())
anim_out.setEasingCurve(QtCore.QEasingCurve.Linear)
# create the anim group
anim_grp = QtCore.QParallelAnimationGroup(self)
anim_grp.addAnimation(anim_in)
anim_grp.addAnimation(anim_out)
anim_grp.finished.connect(curr_page.hide)
anim_grp.finished.connect(anim_grp.deleteLater)
next_page.show()
# update the button states
self._buttons[self.WizardButton.BackButton].setVisible(True)
self._buttons[self.WizardButton.NextButton].setVisible(self.canGoForward())
self._buttons[self.WizardButton.RetryButton].setVisible(self.canRetry())
self._buttons[self.WizardButton.CommitButton].setVisible(next_page.isCommitPage())
self._buttons[self.WizardButton.FinishButton].setVisible(next_page.isFinalPage())
self.adjustSize()
# initialize the new page
self.currentIdChanged.emit(pageId)
next_page.initializePage()
anim_grp.start() |
def sort_by_name(self):
"""Sort list elements by name."""
super(JSSObjectList, self).sort(key=lambda k: k.name) | Sort list elements by name. | Below is the the instruction that describes the task:
### Input:
Sort list elements by name.
### Response:
def sort_by_name(self):
"""Sort list elements by name."""
super(JSSObjectList, self).sort(key=lambda k: k.name) |
def assure_directory_exists(path, is_file=False):
"""Assure that the directory pointed to by path exists.
:param is_file: If True, path is assumed to be a file and handled correctly.
Otherwise it must be a directory
:return: True if the directory was created, False if it already existed"""
if is_file:
path = osp.dirname(path)
# END handle file
if not osp.isdir(path):
os.makedirs(path)
return True
return False | Assure that the directory pointed to by path exists.
:param is_file: If True, path is assumed to be a file and handled correctly.
Otherwise it must be a directory
:return: True if the directory was created, False if it already existed | Below is the the instruction that describes the task:
### Input:
Assure that the directory pointed to by path exists.
:param is_file: If True, path is assumed to be a file and handled correctly.
Otherwise it must be a directory
:return: True if the directory was created, False if it already existed
### Response:
def assure_directory_exists(path, is_file=False):
"""Assure that the directory pointed to by path exists.
:param is_file: If True, path is assumed to be a file and handled correctly.
Otherwise it must be a directory
:return: True if the directory was created, False if it already existed"""
if is_file:
path = osp.dirname(path)
# END handle file
if not osp.isdir(path):
os.makedirs(path)
return True
return False |
def user_path(self, team, user):
"""
Returns the path to directory with the user's package repositories.
"""
return os.path.join(self.team_path(team), user) | Returns the path to directory with the user's package repositories. | Below is the the instruction that describes the task:
### Input:
Returns the path to directory with the user's package repositories.
### Response:
def user_path(self, team, user):
"""
Returns the path to directory with the user's package repositories.
"""
return os.path.join(self.team_path(team), user) |
def ns(self):
"""Get the namespace of a node """
ret = libxml2mod.xmlNodeGetNs(self._o)
if ret is None:return None
__tmp = xmlNs(_obj=ret)
return __tmp | Get the namespace of a node | Below is the the instruction that describes the task:
### Input:
Get the namespace of a node
### Response:
def ns(self):
"""Get the namespace of a node """
ret = libxml2mod.xmlNodeGetNs(self._o)
if ret is None:return None
__tmp = xmlNs(_obj=ret)
return __tmp |
def get_cache_key(request, page, lang, site_id, title):
"""
Create the cache key for the current page and tag type
"""
from cms.cache import _get_cache_key
from cms.templatetags.cms_tags import _get_page_by_untyped_arg
from cms.models import Page
if not isinstance(page, Page):
page = _get_page_by_untyped_arg(page, request, site_id)
if not site_id:
try:
site_id = page.node.site_id
except AttributeError: # CMS_3_4
site_id = page.site_id
if not title:
return _get_cache_key('page_tags', page, '', site_id) + '_type:tags_list'
else:
return _get_cache_key('title_tags', page, lang, site_id) + '_type:tags_list' | Create the cache key for the current page and tag type | Below is the the instruction that describes the task:
### Input:
Create the cache key for the current page and tag type
### Response:
def get_cache_key(request, page, lang, site_id, title):
"""
Create the cache key for the current page and tag type
"""
from cms.cache import _get_cache_key
from cms.templatetags.cms_tags import _get_page_by_untyped_arg
from cms.models import Page
if not isinstance(page, Page):
page = _get_page_by_untyped_arg(page, request, site_id)
if not site_id:
try:
site_id = page.node.site_id
except AttributeError: # CMS_3_4
site_id = page.site_id
if not title:
return _get_cache_key('page_tags', page, '', site_id) + '_type:tags_list'
else:
return _get_cache_key('title_tags', page, lang, site_id) + '_type:tags_list' |
def heap_stats(self):
""" Return heap statistics """
self.lock.acquire()
res = self.ext.mr_heap_stats(self.ctx)
self.lock.release()
python_value = res.contents.to_python()
self.free(res)
return python_value | Return heap statistics | Below is the the instruction that describes the task:
### Input:
Return heap statistics
### Response:
def heap_stats(self):
""" Return heap statistics """
self.lock.acquire()
res = self.ext.mr_heap_stats(self.ctx)
self.lock.release()
python_value = res.contents.to_python()
self.free(res)
return python_value |
def short(cls, path):
"""
Example:
short("examined /Users/joe/foo") => "examined ~/foo"
Args:
path: Path to represent in its short form
Returns:
(str): Short form, using '~' if applicable
"""
if not path:
return path
path = str(path)
if cls.paths:
for p in cls.paths:
if p:
path = path.replace(p + "/", "")
path = path.replace(cls.home, "~")
return path | Example:
short("examined /Users/joe/foo") => "examined ~/foo"
Args:
path: Path to represent in its short form
Returns:
(str): Short form, using '~' if applicable | Below is the the instruction that describes the task:
### Input:
Example:
short("examined /Users/joe/foo") => "examined ~/foo"
Args:
path: Path to represent in its short form
Returns:
(str): Short form, using '~' if applicable
### Response:
def short(cls, path):
"""
Example:
short("examined /Users/joe/foo") => "examined ~/foo"
Args:
path: Path to represent in its short form
Returns:
(str): Short form, using '~' if applicable
"""
if not path:
return path
path = str(path)
if cls.paths:
for p in cls.paths:
if p:
path = path.replace(p + "/", "")
path = path.replace(cls.home, "~")
return path |
def walkfiles(startdir, regex=None, recurse=True):
"""Yields the absolute paths of files found within the given start
directory. Can optionally filter paths using a regex pattern."""
for r,_,fs in os.walk(startdir):
if not recurse and startdir != r:
return
for f in fs:
path = op.abspath(op.join(r,f))
if regex and not _is_match(regex, path):
continue
if op.isfile(path):
yield path | Yields the absolute paths of files found within the given start
directory. Can optionally filter paths using a regex pattern. | Below is the the instruction that describes the task:
### Input:
Yields the absolute paths of files found within the given start
directory. Can optionally filter paths using a regex pattern.
### Response:
def walkfiles(startdir, regex=None, recurse=True):
"""Yields the absolute paths of files found within the given start
directory. Can optionally filter paths using a regex pattern."""
for r,_,fs in os.walk(startdir):
if not recurse and startdir != r:
return
for f in fs:
path = op.abspath(op.join(r,f))
if regex and not _is_match(regex, path):
continue
if op.isfile(path):
yield path |
def process_tx_receipt(tx_hash, event, event_name):
"""
Wait until the tx receipt is processed.
:param tx_hash: hash of the transaction
:param event: AttributeDict with the event data.
:param event_name: name of the event to subscribe, str
:return:
"""
web3 = Web3Provider.get_web3()
try:
web3.eth.waitForTransactionReceipt(tx_hash, timeout=20)
except Timeout:
logger.info('Waiting for transaction receipt timed out. Cannot verify receipt and event.')
return
receipt = web3.eth.getTransactionReceipt(tx_hash)
event = event().processReceipt(receipt)
if event:
logger.info(f'Success: got {event_name} event after fulfilling condition.')
logger.debug(
f'Success: got {event_name} event after fulfilling condition. {receipt}, ::: {event}')
else:
logger.debug(f'Something is not right, cannot find the {event_name} event after calling the'
f' fulfillment condition. This is the transaction receipt {receipt}')
if receipt and receipt.status == 0:
logger.warning(
f'Transaction failed: tx_hash {tx_hash}, tx event {event_name}, receipt {receipt}') | Wait until the tx receipt is processed.
:param tx_hash: hash of the transaction
:param event: AttributeDict with the event data.
:param event_name: name of the event to subscribe, str
:return: | Below is the the instruction that describes the task:
### Input:
Wait until the tx receipt is processed.
:param tx_hash: hash of the transaction
:param event: AttributeDict with the event data.
:param event_name: name of the event to subscribe, str
:return:
### Response:
def process_tx_receipt(tx_hash, event, event_name):
"""
Wait until the tx receipt is processed.
:param tx_hash: hash of the transaction
:param event: AttributeDict with the event data.
:param event_name: name of the event to subscribe, str
:return:
"""
web3 = Web3Provider.get_web3()
try:
web3.eth.waitForTransactionReceipt(tx_hash, timeout=20)
except Timeout:
logger.info('Waiting for transaction receipt timed out. Cannot verify receipt and event.')
return
receipt = web3.eth.getTransactionReceipt(tx_hash)
event = event().processReceipt(receipt)
if event:
logger.info(f'Success: got {event_name} event after fulfilling condition.')
logger.debug(
f'Success: got {event_name} event after fulfilling condition. {receipt}, ::: {event}')
else:
logger.debug(f'Something is not right, cannot find the {event_name} event after calling the'
f' fulfillment condition. This is the transaction receipt {receipt}')
if receipt and receipt.status == 0:
logger.warning(
f'Transaction failed: tx_hash {tx_hash}, tx event {event_name}, receipt {receipt}') |
def _get_code_dir(self, code_path):
"""
Method to get a path to a directory where the Lambda function code is available. This directory will
be mounted directly inside the Docker container.
This method handles a few different cases for ``code_path``:
- ``code_path``is a existent zip/jar file: Unzip in a temp directory and return the temp directory
- ``code_path`` is a existent directory: Return this immediately
- ``code_path`` is a file/dir that does not exist: Return it as is. May be this method is not clever to
detect the existence of the path
:param string code_path: Path to the code. This could be pointing at a file or folder either on a local
disk or in some network file system
:return string: Directory containing Lambda function code. It can be mounted directly in container
"""
decompressed_dir = None
try:
if os.path.isfile(code_path) and code_path.endswith(self.SUPPORTED_ARCHIVE_EXTENSIONS):
decompressed_dir = _unzip_file(code_path)
yield decompressed_dir
else:
LOG.debug("Code %s is not a zip/jar file", code_path)
yield code_path
finally:
if decompressed_dir:
shutil.rmtree(decompressed_dir) | Method to get a path to a directory where the Lambda function code is available. This directory will
be mounted directly inside the Docker container.
This method handles a few different cases for ``code_path``:
- ``code_path``is a existent zip/jar file: Unzip in a temp directory and return the temp directory
- ``code_path`` is a existent directory: Return this immediately
- ``code_path`` is a file/dir that does not exist: Return it as is. May be this method is not clever to
detect the existence of the path
:param string code_path: Path to the code. This could be pointing at a file or folder either on a local
disk or in some network file system
:return string: Directory containing Lambda function code. It can be mounted directly in container | Below is the the instruction that describes the task:
### Input:
Method to get a path to a directory where the Lambda function code is available. This directory will
be mounted directly inside the Docker container.
This method handles a few different cases for ``code_path``:
- ``code_path``is a existent zip/jar file: Unzip in a temp directory and return the temp directory
- ``code_path`` is a existent directory: Return this immediately
- ``code_path`` is a file/dir that does not exist: Return it as is. May be this method is not clever to
detect the existence of the path
:param string code_path: Path to the code. This could be pointing at a file or folder either on a local
disk or in some network file system
:return string: Directory containing Lambda function code. It can be mounted directly in container
### Response:
def _get_code_dir(self, code_path):
"""
Method to get a path to a directory where the Lambda function code is available. This directory will
be mounted directly inside the Docker container.
This method handles a few different cases for ``code_path``:
- ``code_path``is a existent zip/jar file: Unzip in a temp directory and return the temp directory
- ``code_path`` is a existent directory: Return this immediately
- ``code_path`` is a file/dir that does not exist: Return it as is. May be this method is not clever to
detect the existence of the path
:param string code_path: Path to the code. This could be pointing at a file or folder either on a local
disk or in some network file system
:return string: Directory containing Lambda function code. It can be mounted directly in container
"""
decompressed_dir = None
try:
if os.path.isfile(code_path) and code_path.endswith(self.SUPPORTED_ARCHIVE_EXTENSIONS):
decompressed_dir = _unzip_file(code_path)
yield decompressed_dir
else:
LOG.debug("Code %s is not a zip/jar file", code_path)
yield code_path
finally:
if decompressed_dir:
shutil.rmtree(decompressed_dir) |
def PostMessage(handle: int, msg: int, wParam: int, lParam: int) -> bool:
"""
PostMessage from Win32.
Return bool, True if succeed otherwise False.
"""
return bool(ctypes.windll.user32.PostMessageW(ctypes.c_void_p(handle), msg, wParam, lParam)) | PostMessage from Win32.
Return bool, True if succeed otherwise False. | Below is the the instruction that describes the task:
### Input:
PostMessage from Win32.
Return bool, True if succeed otherwise False.
### Response:
def PostMessage(handle: int, msg: int, wParam: int, lParam: int) -> bool:
"""
PostMessage from Win32.
Return bool, True if succeed otherwise False.
"""
return bool(ctypes.windll.user32.PostMessageW(ctypes.c_void_p(handle), msg, wParam, lParam)) |
def updateRasterBounds(self):
"""Updates the y-coordinate slots where the raster points
are plotted, according to the current limits of the y-axis"""
yrange = self.viewRange()[1]
yrange_size = yrange[1] - yrange[0]
rmax = self.rasterTop*yrange_size + yrange[0]
rmin = self.rasterBottom*yrange_size + yrange[0]
self.rasterYslots = np.linspace(rmin, rmax, self.nreps)
self.rasterBoundsUpdated.emit((self.rasterBottom, self.rasterTop), self.getTitle()) | Updates the y-coordinate slots where the raster points
are plotted, according to the current limits of the y-axis | Below is the the instruction that describes the task:
### Input:
Updates the y-coordinate slots where the raster points
are plotted, according to the current limits of the y-axis
### Response:
def updateRasterBounds(self):
"""Updates the y-coordinate slots where the raster points
are plotted, according to the current limits of the y-axis"""
yrange = self.viewRange()[1]
yrange_size = yrange[1] - yrange[0]
rmax = self.rasterTop*yrange_size + yrange[0]
rmin = self.rasterBottom*yrange_size + yrange[0]
self.rasterYslots = np.linspace(rmin, rmax, self.nreps)
self.rasterBoundsUpdated.emit((self.rasterBottom, self.rasterTop), self.getTitle()) |
def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8 | Attach a copy of the record as JSON so we have an un-mangled copy. | Below is the the instruction that describes the task:
### Input:
Attach a copy of the record as JSON so we have an un-mangled copy.
### Response:
def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8 |
def hamming_distance(s1, s2, equality_function=operator.eq):
"""
Returns the hamming distance between two strings.
"""
if not len(s1) == len(s2):
raise ValueError("String lengths are not equal")
# Number of non-matching characters:
return sum(not equality_function(c1, c2) for c1, c2 in zip(s1, s2)) | Returns the hamming distance between two strings. | Below is the the instruction that describes the task:
### Input:
Returns the hamming distance between two strings.
### Response:
def hamming_distance(s1, s2, equality_function=operator.eq):
"""
Returns the hamming distance between two strings.
"""
if not len(s1) == len(s2):
raise ValueError("String lengths are not equal")
# Number of non-matching characters:
return sum(not equality_function(c1, c2) for c1, c2 in zip(s1, s2)) |
def phrase_replace(self, replace_dict):
"""
Replace phrases with single token, mapping defined in replace_dict
"""
def r(tokens):
text = ' ' + ' '.join(tokens)
for k, v in replace_dict.items():
text = text.replace(" " + k + " ", " " + v + " ")
return text.split()
self.stems = list(map(r, self.stems)) | Replace phrases with single token, mapping defined in replace_dict | Below is the the instruction that describes the task:
### Input:
Replace phrases with single token, mapping defined in replace_dict
### Response:
def phrase_replace(self, replace_dict):
"""
Replace phrases with single token, mapping defined in replace_dict
"""
def r(tokens):
text = ' ' + ' '.join(tokens)
for k, v in replace_dict.items():
text = text.replace(" " + k + " ", " " + v + " ")
return text.split()
self.stems = list(map(r, self.stems)) |
async def get_update_info(self, from_network=True) -> SoftwareUpdateInfo:
"""Get information about updates."""
if from_network:
from_network = "true"
else:
from_network = "false"
# from_network = ""
info = await self.services["system"]["getSWUpdateInfo"](network=from_network)
return SoftwareUpdateInfo.make(**info) | Get information about updates. | Below is the the instruction that describes the task:
### Input:
Get information about updates.
### Response:
async def get_update_info(self, from_network=True) -> SoftwareUpdateInfo:
"""Get information about updates."""
if from_network:
from_network = "true"
else:
from_network = "false"
# from_network = ""
info = await self.services["system"]["getSWUpdateInfo"](network=from_network)
return SoftwareUpdateInfo.make(**info) |
def p_arithmetic_expression(self, p):
'''
expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIV expression
| expression MOD expression
'''
p[0] = BinaryOperationNode(left=p[1],
operator=p[2],
right=p[3]) | expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIV expression
| expression MOD expression | Below is the the instruction that describes the task:
### Input:
expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIV expression
| expression MOD expression
### Response:
def p_arithmetic_expression(self, p):
'''
expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIV expression
| expression MOD expression
'''
p[0] = BinaryOperationNode(left=p[1],
operator=p[2],
right=p[3]) |
def update_wcs(image,extnum,new_wcs,wcsname="",reusename=False,verbose=False):
"""
Updates the WCS of the specified extension number with the new WCS
after archiving the original WCS.
The value of 'new_wcs' needs to be the full
HSTWCS object.
Parameters
----------
image : str
Filename of image with WCS that needs to be updated
extnum : int
Extension number for extension with WCS to be updated/replaced
new_wcs : object
Full HSTWCS object which will replace/update the existing WCS
wcsname : str
Label to give newly updated WCS
reusename : bool
User can choose whether to over-write WCS with same name or not.
[Default: False]
verbose : bool, int
Print extra messages during processing? [Default: False]
"""
# Start by insuring that the correct value of 'orientat' has been computed
new_wcs.setOrient()
fimg_open=False
if not isinstance(image, fits.HDUList):
fimg = fits.open(image, mode='update', memmap=False)
fimg_open = True
fimg_update = True
else:
fimg = image
if fimg.fileinfo(0)['filemode'] is 'update':
fimg_update = True
else:
fimg_update = False
# Determine final (unique) WCSNAME value, either based on the default or
# user-provided name
if util.is_blank(wcsname):
wcsname = 'TWEAK'
if not reusename:
wcsname = create_unique_wcsname(fimg, extnum, wcsname)
idchdr = True
if new_wcs.idcscale is None:
idchdr = False
# Open the file for updating the WCS
try:
logstr = 'Updating header for %s[%s]'%(fimg.filename(),str(extnum))
if verbose:
print(logstr)
else:
log.info(logstr)
hdr = fimg[extnum].header
if verbose:
log.info(' with WCS of')
new_wcs.printwcs()
print("WCSNAME : ",wcsname)
# Insure that if a copy of the WCS has not been created yet, it will be now
wcs_hdr = new_wcs.wcs2header(idc2hdr=idchdr, relax=True)
for key in wcs_hdr:
hdr[key] = wcs_hdr[key]
hdr['ORIENTAT'] = new_wcs.orientat
hdr['WCSNAME'] = wcsname
util.updateNEXTENDKw(fimg)
# Only if this image was opened in update mode should this
# newly updated WCS be archived, as it will never be written out
# to a file otherwise.
if fimg_update:
if not reusename:
# Save the newly updated WCS as an alternate WCS as well
wkey = wcsutil.altwcs.next_wcskey(fimg,ext=extnum)
else:
wkey = wcsutil.altwcs.getKeyFromName(hdr,wcsname)
# wcskey needs to be specified so that archiveWCS will create a
# duplicate WCS with the same WCSNAME as the Primary WCS
wcsutil.altwcs.archiveWCS(fimg,[extnum],wcsname=wcsname,
wcskey=wkey, reusekey=reusename)
finally:
if fimg_open:
# finish up by closing the file now
fimg.close() | Updates the WCS of the specified extension number with the new WCS
after archiving the original WCS.
The value of 'new_wcs' needs to be the full
HSTWCS object.
Parameters
----------
image : str
Filename of image with WCS that needs to be updated
extnum : int
Extension number for extension with WCS to be updated/replaced
new_wcs : object
Full HSTWCS object which will replace/update the existing WCS
wcsname : str
Label to give newly updated WCS
reusename : bool
User can choose whether to over-write WCS with same name or not.
[Default: False]
verbose : bool, int
Print extra messages during processing? [Default: False] | Below is the the instruction that describes the task:
### Input:
Updates the WCS of the specified extension number with the new WCS
after archiving the original WCS.
The value of 'new_wcs' needs to be the full
HSTWCS object.
Parameters
----------
image : str
Filename of image with WCS that needs to be updated
extnum : int
Extension number for extension with WCS to be updated/replaced
new_wcs : object
Full HSTWCS object which will replace/update the existing WCS
wcsname : str
Label to give newly updated WCS
reusename : bool
User can choose whether to over-write WCS with same name or not.
[Default: False]
verbose : bool, int
Print extra messages during processing? [Default: False]
### Response:
def update_wcs(image,extnum,new_wcs,wcsname="",reusename=False,verbose=False):
"""
Updates the WCS of the specified extension number with the new WCS
after archiving the original WCS.
The value of 'new_wcs' needs to be the full
HSTWCS object.
Parameters
----------
image : str
Filename of image with WCS that needs to be updated
extnum : int
Extension number for extension with WCS to be updated/replaced
new_wcs : object
Full HSTWCS object which will replace/update the existing WCS
wcsname : str
Label to give newly updated WCS
reusename : bool
User can choose whether to over-write WCS with same name or not.
[Default: False]
verbose : bool, int
Print extra messages during processing? [Default: False]
"""
# Start by insuring that the correct value of 'orientat' has been computed
new_wcs.setOrient()
fimg_open=False
if not isinstance(image, fits.HDUList):
fimg = fits.open(image, mode='update', memmap=False)
fimg_open = True
fimg_update = True
else:
fimg = image
if fimg.fileinfo(0)['filemode'] is 'update':
fimg_update = True
else:
fimg_update = False
# Determine final (unique) WCSNAME value, either based on the default or
# user-provided name
if util.is_blank(wcsname):
wcsname = 'TWEAK'
if not reusename:
wcsname = create_unique_wcsname(fimg, extnum, wcsname)
idchdr = True
if new_wcs.idcscale is None:
idchdr = False
# Open the file for updating the WCS
try:
logstr = 'Updating header for %s[%s]'%(fimg.filename(),str(extnum))
if verbose:
print(logstr)
else:
log.info(logstr)
hdr = fimg[extnum].header
if verbose:
log.info(' with WCS of')
new_wcs.printwcs()
print("WCSNAME : ",wcsname)
# Insure that if a copy of the WCS has not been created yet, it will be now
wcs_hdr = new_wcs.wcs2header(idc2hdr=idchdr, relax=True)
for key in wcs_hdr:
hdr[key] = wcs_hdr[key]
hdr['ORIENTAT'] = new_wcs.orientat
hdr['WCSNAME'] = wcsname
util.updateNEXTENDKw(fimg)
# Only if this image was opened in update mode should this
# newly updated WCS be archived, as it will never be written out
# to a file otherwise.
if fimg_update:
if not reusename:
# Save the newly updated WCS as an alternate WCS as well
wkey = wcsutil.altwcs.next_wcskey(fimg,ext=extnum)
else:
wkey = wcsutil.altwcs.getKeyFromName(hdr,wcsname)
# wcskey needs to be specified so that archiveWCS will create a
# duplicate WCS with the same WCSNAME as the Primary WCS
wcsutil.altwcs.archiveWCS(fimg,[extnum],wcsname=wcsname,
wcskey=wkey, reusekey=reusename)
finally:
if fimg_open:
# finish up by closing the file now
fimg.close() |
def section(rows, columns, items, label=None):
"""A section consisting of rows and columns"""
# TODO: Integrate label
sections = []
column_class = "section-column col-sm-%i" % (12 / columns)
for vertical in range(columns):
column_items = []
for horizontal in range(rows):
try:
item = items[horizontal][vertical]
column_items.append(item)
except IndexError:
hfoslog('Field in', label, 'omitted, due to missing row/column:', vertical, horizontal,
lvl=warn, emitter='FORMS', tb=True, frame=2)
column = {
'type': 'section',
'htmlClass': column_class,
'items': column_items
}
sections.append(column)
result = {
'type': 'section',
'htmlClass': 'row',
'items': sections
}
return result | A section consisting of rows and columns | Below is the the instruction that describes the task:
### Input:
A section consisting of rows and columns
### Response:
def section(rows, columns, items, label=None):
"""A section consisting of rows and columns"""
# TODO: Integrate label
sections = []
column_class = "section-column col-sm-%i" % (12 / columns)
for vertical in range(columns):
column_items = []
for horizontal in range(rows):
try:
item = items[horizontal][vertical]
column_items.append(item)
except IndexError:
hfoslog('Field in', label, 'omitted, due to missing row/column:', vertical, horizontal,
lvl=warn, emitter='FORMS', tb=True, frame=2)
column = {
'type': 'section',
'htmlClass': column_class,
'items': column_items
}
sections.append(column)
result = {
'type': 'section',
'htmlClass': 'row',
'items': sections
}
return result |
def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False):
"""Compute the number of true positives in an estimate given a reference.
A frequency is correct if it is within a quartertone of the
correct frequency.
Parameters
----------
ref_freqs : list of np.ndarray
reference frequencies (MIDI)
est_freqs : list of np.ndarray
estimated frequencies (MIDI)
window : float
Window size, in semitones
chroma : bool
If True, computes distances modulo n.
If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.
Returns
-------
true_positives : np.ndarray
Array the same length as ref_freqs containing the number of true
positives.
"""
n_frames = len(ref_freqs)
true_positives = np.zeros((n_frames, ))
for i, (ref_frame, est_frame) in enumerate(zip(ref_freqs, est_freqs)):
if chroma:
# match chroma-wrapped frequency events
matching = util.match_events(
ref_frame, est_frame, window,
distance=util._outer_distance_mod_n)
else:
# match frequency events within tolerance window in semitones
matching = util.match_events(ref_frame, est_frame, window)
true_positives[i] = len(matching)
return true_positives | Compute the number of true positives in an estimate given a reference.
A frequency is correct if it is within a quartertone of the
correct frequency.
Parameters
----------
ref_freqs : list of np.ndarray
reference frequencies (MIDI)
est_freqs : list of np.ndarray
estimated frequencies (MIDI)
window : float
Window size, in semitones
chroma : bool
If True, computes distances modulo n.
If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.
Returns
-------
true_positives : np.ndarray
Array the same length as ref_freqs containing the number of true
positives. | Below is the the instruction that describes the task:
### Input:
Compute the number of true positives in an estimate given a reference.
A frequency is correct if it is within a quartertone of the
correct frequency.
Parameters
----------
ref_freqs : list of np.ndarray
reference frequencies (MIDI)
est_freqs : list of np.ndarray
estimated frequencies (MIDI)
window : float
Window size, in semitones
chroma : bool
If True, computes distances modulo n.
If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.
Returns
-------
true_positives : np.ndarray
Array the same length as ref_freqs containing the number of true
positives.
### Response:
def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False):
"""Compute the number of true positives in an estimate given a reference.
A frequency is correct if it is within a quartertone of the
correct frequency.
Parameters
----------
ref_freqs : list of np.ndarray
reference frequencies (MIDI)
est_freqs : list of np.ndarray
estimated frequencies (MIDI)
window : float
Window size, in semitones
chroma : bool
If True, computes distances modulo n.
If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.
Returns
-------
true_positives : np.ndarray
Array the same length as ref_freqs containing the number of true
positives.
"""
n_frames = len(ref_freqs)
true_positives = np.zeros((n_frames, ))
for i, (ref_frame, est_frame) in enumerate(zip(ref_freqs, est_freqs)):
if chroma:
# match chroma-wrapped frequency events
matching = util.match_events(
ref_frame, est_frame, window,
distance=util._outer_distance_mod_n)
else:
# match frequency events within tolerance window in semitones
matching = util.match_events(ref_frame, est_frame, window)
true_positives[i] = len(matching)
return true_positives |
def to_latex(circuit, settings=None):
"""
Translates a given pyquil Program to a TikZ picture in a Latex document.
:param Program circuit: The circuit to be drawn, represented as a pyquil program.
:param dict settings: An optional dictionary with settings for drawing the circuit. See `get_default_settings`
in `latex_config` for more information about what settings should contain.
:return: LaTeX document string which can be compiled.
:rtype: string
"""
if settings is None:
settings = get_default_settings()
text = header(settings)
text += body(circuit, settings)
text += footer()
return text | Translates a given pyquil Program to a TikZ picture in a Latex document.
:param Program circuit: The circuit to be drawn, represented as a pyquil program.
:param dict settings: An optional dictionary with settings for drawing the circuit. See `get_default_settings`
in `latex_config` for more information about what settings should contain.
:return: LaTeX document string which can be compiled.
:rtype: string | Below is the the instruction that describes the task:
### Input:
Translates a given pyquil Program to a TikZ picture in a Latex document.
:param Program circuit: The circuit to be drawn, represented as a pyquil program.
:param dict settings: An optional dictionary with settings for drawing the circuit. See `get_default_settings`
in `latex_config` for more information about what settings should contain.
:return: LaTeX document string which can be compiled.
:rtype: string
### Response:
def to_latex(circuit, settings=None):
"""
Translates a given pyquil Program to a TikZ picture in a Latex document.
:param Program circuit: The circuit to be drawn, represented as a pyquil program.
:param dict settings: An optional dictionary with settings for drawing the circuit. See `get_default_settings`
in `latex_config` for more information about what settings should contain.
:return: LaTeX document string which can be compiled.
:rtype: string
"""
if settings is None:
settings = get_default_settings()
text = header(settings)
text += body(circuit, settings)
text += footer()
return text |
def bitop(self, operation, dest_key, *keys):
"""Perform a bitwise operation between multiple keys (containing
string values) and store the result in the destination key.
The values for operation can be one of:
- ``b'AND'``
- ``b'OR'``
- ``b'XOR'``
- ``b'NOT'``
- :data:`tredis.BITOP_AND` or ``b'&'``
- :data:`tredis.BITOP_OR` or ``b'|'``
- :data:`tredis.BITOP_XOR` or ``b'^'``
- :data:`tredis.BITOP_NOT` or ``b'~'``
``b'NOT'`` is special as it only takes an input key, because it
performs inversion of bits so it only makes sense as an unary operator.
The result of the operation is always stored at ``dest_key``.
**Handling of strings with different lengths**
When an operation is performed between strings having different
lengths, all the strings shorter than the longest string in the set are
treated as if they were zero-padded up to the length of the longest
string.
The same holds true for non-existent keys, that are considered as a
stream of zero bytes up to the length of the longest string.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)``
:param bytes operation: The operation to perform
:param dest_key: The key to store the bitwise operation results to
:type dest_key: :class:`str`, :class:`bytes`
:param keys: One or more keys as keyword parameters for the bitwise op
:type keys: :class:`str`, :class:`bytes`
:return: The size of the string stored in the destination key, that is
equal to the size of the longest input string.
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`, :exc:`ValueError`
"""
if (operation not in _BITOPTS.keys()
and operation not in _BITOPTS.values()):
raise ValueError('Invalid operation value: {}'.format(operation))
elif operation in [b'~', b'NOT'] and len(keys) > 1:
raise ValueError('NOT can only be used with 1 key')
if operation in _BITOPTS.keys():
operation = _BITOPTS[operation]
return self._execute([b'BITOP', operation, dest_key] + list(keys)) | Perform a bitwise operation between multiple keys (containing
string values) and store the result in the destination key.
The values for operation can be one of:
- ``b'AND'``
- ``b'OR'``
- ``b'XOR'``
- ``b'NOT'``
- :data:`tredis.BITOP_AND` or ``b'&'``
- :data:`tredis.BITOP_OR` or ``b'|'``
- :data:`tredis.BITOP_XOR` or ``b'^'``
- :data:`tredis.BITOP_NOT` or ``b'~'``
``b'NOT'`` is special as it only takes an input key, because it
performs inversion of bits so it only makes sense as an unary operator.
The result of the operation is always stored at ``dest_key``.
**Handling of strings with different lengths**
When an operation is performed between strings having different
lengths, all the strings shorter than the longest string in the set are
treated as if they were zero-padded up to the length of the longest
string.
The same holds true for non-existent keys, that are considered as a
stream of zero bytes up to the length of the longest string.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)``
:param bytes operation: The operation to perform
:param dest_key: The key to store the bitwise operation results to
:type dest_key: :class:`str`, :class:`bytes`
:param keys: One or more keys as keyword parameters for the bitwise op
:type keys: :class:`str`, :class:`bytes`
:return: The size of the string stored in the destination key, that is
equal to the size of the longest input string.
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`, :exc:`ValueError` | Below is the the instruction that describes the task:
### Input:
Perform a bitwise operation between multiple keys (containing
string values) and store the result in the destination key.
The values for operation can be one of:
- ``b'AND'``
- ``b'OR'``
- ``b'XOR'``
- ``b'NOT'``
- :data:`tredis.BITOP_AND` or ``b'&'``
- :data:`tredis.BITOP_OR` or ``b'|'``
- :data:`tredis.BITOP_XOR` or ``b'^'``
- :data:`tredis.BITOP_NOT` or ``b'~'``
``b'NOT'`` is special as it only takes an input key, because it
performs inversion of bits so it only makes sense as an unary operator.
The result of the operation is always stored at ``dest_key``.
**Handling of strings with different lengths**
When an operation is performed between strings having different
lengths, all the strings shorter than the longest string in the set are
treated as if they were zero-padded up to the length of the longest
string.
The same holds true for non-existent keys, that are considered as a
stream of zero bytes up to the length of the longest string.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)``
:param bytes operation: The operation to perform
:param dest_key: The key to store the bitwise operation results to
:type dest_key: :class:`str`, :class:`bytes`
:param keys: One or more keys as keyword parameters for the bitwise op
:type keys: :class:`str`, :class:`bytes`
:return: The size of the string stored in the destination key, that is
equal to the size of the longest input string.
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`, :exc:`ValueError`
### Response:
def bitop(self, operation, dest_key, *keys):
"""Perform a bitwise operation between multiple keys (containing
string values) and store the result in the destination key.
The values for operation can be one of:
- ``b'AND'``
- ``b'OR'``
- ``b'XOR'``
- ``b'NOT'``
- :data:`tredis.BITOP_AND` or ``b'&'``
- :data:`tredis.BITOP_OR` or ``b'|'``
- :data:`tredis.BITOP_XOR` or ``b'^'``
- :data:`tredis.BITOP_NOT` or ``b'~'``
``b'NOT'`` is special as it only takes an input key, because it
performs inversion of bits so it only makes sense as an unary operator.
The result of the operation is always stored at ``dest_key``.
**Handling of strings with different lengths**
When an operation is performed between strings having different
lengths, all the strings shorter than the longest string in the set are
treated as if they were zero-padded up to the length of the longest
string.
The same holds true for non-existent keys, that are considered as a
stream of zero bytes up to the length of the longest string.
.. versionadded:: 0.2.0
.. note:: **Time complexity**: ``O(N)``
:param bytes operation: The operation to perform
:param dest_key: The key to store the bitwise operation results to
:type dest_key: :class:`str`, :class:`bytes`
:param keys: One or more keys as keyword parameters for the bitwise op
:type keys: :class:`str`, :class:`bytes`
:return: The size of the string stored in the destination key, that is
equal to the size of the longest input string.
:rtype: int
:raises: :exc:`~tredis.exceptions.RedisError`, :exc:`ValueError`
"""
if (operation not in _BITOPTS.keys()
and operation not in _BITOPTS.values()):
raise ValueError('Invalid operation value: {}'.format(operation))
elif operation in [b'~', b'NOT'] and len(keys) > 1:
raise ValueError('NOT can only be used with 1 key')
if operation in _BITOPTS.keys():
operation = _BITOPTS[operation]
return self._execute([b'BITOP', operation, dest_key] + list(keys)) |
def _get_repo():
"""Identify the path to the repository origin."""
command = ['git', 'rev-parse', '--show-toplevel']
if six.PY2:
try:
return check_output(command).decode('utf-8').strip() # nosec
except CalledProcessError:
return ''
else:
return (run(command, stdout=PIPE, stderr=PIPE)
.stdout.decode('utf-8').strip()) | Identify the path to the repository origin. | Below is the the instruction that describes the task:
### Input:
Identify the path to the repository origin.
### Response:
def _get_repo():
"""Identify the path to the repository origin."""
command = ['git', 'rev-parse', '--show-toplevel']
if six.PY2:
try:
return check_output(command).decode('utf-8').strip() # nosec
except CalledProcessError:
return ''
else:
return (run(command, stdout=PIPE, stderr=PIPE)
.stdout.decode('utf-8').strip()) |
def set_wave_form(self, value):
''' setter '''
if isinstance(value, WaveFormInterface) is False:
raise TypeError()
self.__wave_form = value | setter | Below is the the instruction that describes the task:
### Input:
setter
### Response:
def set_wave_form(self, value):
''' setter '''
if isinstance(value, WaveFormInterface) is False:
raise TypeError()
self.__wave_form = value |
def _make_reversed_wildcards(self, old_length=-1):
"""Creates a full mapping from all wildcard translations to the corresponding wildcards"""
if len(self._reversed_wildcards) > 0:
# We already created reversed wildcards, so we don't need to do all of them
# again
start = old_length
else:
start = -1
for wildcards, func in self._wildcard_functions.items():
for irun in range(start, len(self)):
translated_name = func(irun)
if not translated_name in self._reversed_wildcards:
self._reversed_wildcards[translated_name] = ([], wildcards)
self._reversed_wildcards[translated_name][0].append(irun) | Creates a full mapping from all wildcard translations to the corresponding wildcards | Below is the the instruction that describes the task:
### Input:
Creates a full mapping from all wildcard translations to the corresponding wildcards
### Response:
def _make_reversed_wildcards(self, old_length=-1):
"""Creates a full mapping from all wildcard translations to the corresponding wildcards"""
if len(self._reversed_wildcards) > 0:
# We already created reversed wildcards, so we don't need to do all of them
# again
start = old_length
else:
start = -1
for wildcards, func in self._wildcard_functions.items():
for irun in range(start, len(self)):
translated_name = func(irun)
if not translated_name in self._reversed_wildcards:
self._reversed_wildcards[translated_name] = ([], wildcards)
self._reversed_wildcards[translated_name][0].append(irun) |
def data(self, index, role):
'''Return data for *index* according to *role*.'''
if not index.isValid():
return None
column = index.column()
item = index.internalPointer()
if role == self.ITEM_ROLE:
return item
elif role == Qt.DisplayRole:
if column == 0:
return item.name
elif column == 1:
if item.size:
return item.size
elif column == 2:
return item.type
elif column == 3:
if item.modified is not None:
return item.modified.strftime('%c')
elif role == Qt.DecorationRole:
if column == 0:
return self.iconFactory.icon(item)
elif role == Qt.TextAlignmentRole:
if column == 1:
return Qt.AlignRight
else:
return Qt.AlignLeft
return None | Return data for *index* according to *role*. | Below is the the instruction that describes the task:
### Input:
Return data for *index* according to *role*.
### Response:
def data(self, index, role):
'''Return data for *index* according to *role*.'''
if not index.isValid():
return None
column = index.column()
item = index.internalPointer()
if role == self.ITEM_ROLE:
return item
elif role == Qt.DisplayRole:
if column == 0:
return item.name
elif column == 1:
if item.size:
return item.size
elif column == 2:
return item.type
elif column == 3:
if item.modified is not None:
return item.modified.strftime('%c')
elif role == Qt.DecorationRole:
if column == 0:
return self.iconFactory.icon(item)
elif role == Qt.TextAlignmentRole:
if column == 1:
return Qt.AlignRight
else:
return Qt.AlignLeft
return None |
def apply_boundary_conditions_to_cm(external_indices, cm):
"""Remove connections to or from external nodes."""
cm = cm.copy()
cm[external_indices, :] = 0 # Zero-out row
cm[:, external_indices] = 0 # Zero-out columnt
return cm | Remove connections to or from external nodes. | Below is the the instruction that describes the task:
### Input:
Remove connections to or from external nodes.
### Response:
def apply_boundary_conditions_to_cm(external_indices, cm):
"""Remove connections to or from external nodes."""
cm = cm.copy()
cm[external_indices, :] = 0 # Zero-out row
cm[:, external_indices] = 0 # Zero-out columnt
return cm |
def run(self, block=True, binary=False, cwd=None):
"""Runs the given command, with or without pexpect functionality enabled."""
self.blocking = block
# Use subprocess.
if self.blocking:
popen_kwargs = self._default_popen_kwargs.copy()
popen_kwargs['universal_newlines'] = not binary
if cwd:
popen_kwargs['cwd'] = cwd
s = subprocess.Popen(self._popen_args, **popen_kwargs)
# Otherwise, use pexpect.
else:
pexpect_kwargs = self._default_pexpect_kwargs.copy()
if binary:
pexpect_kwargs['encoding'] = None
if cwd:
pexpect_kwargs['cwd'] = cwd
# Enable Python subprocesses to work with expect functionality.
pexpect_kwargs['env']['PYTHONUNBUFFERED'] = '1'
s = PopenSpawn(self._popen_args, **pexpect_kwargs)
self.subprocess = s
self.was_run = True | Runs the given command, with or without pexpect functionality enabled. | Below is the the instruction that describes the task:
### Input:
Runs the given command, with or without pexpect functionality enabled.
### Response:
def run(self, block=True, binary=False, cwd=None):
"""Runs the given command, with or without pexpect functionality enabled."""
self.blocking = block
# Use subprocess.
if self.blocking:
popen_kwargs = self._default_popen_kwargs.copy()
popen_kwargs['universal_newlines'] = not binary
if cwd:
popen_kwargs['cwd'] = cwd
s = subprocess.Popen(self._popen_args, **popen_kwargs)
# Otherwise, use pexpect.
else:
pexpect_kwargs = self._default_pexpect_kwargs.copy()
if binary:
pexpect_kwargs['encoding'] = None
if cwd:
pexpect_kwargs['cwd'] = cwd
# Enable Python subprocesses to work with expect functionality.
pexpect_kwargs['env']['PYTHONUNBUFFERED'] = '1'
s = PopenSpawn(self._popen_args, **pexpect_kwargs)
self.subprocess = s
self.was_run = True |
def close_connection(self, connection, force=False):
"""overriding the baseclass function, this routine will decline to
close a connection at the end of a transaction context. This allows
for reuse of connections."""
if force:
print('PostgresPooled - delegating connection closure')
try:
super(PostgresPooled, self).close_connection(connection,
force)
except self.operational_exceptions:
print('PostgresPooled - failed closing')
for name, conn in self.pool.iteritems():
if conn is connection:
break
del self.pool[name]
else:
print('PostgresPooled - refusing to close connection') | overriding the baseclass function, this routine will decline to
close a connection at the end of a transaction context. This allows
for reuse of connections. | Below is the the instruction that describes the task:
### Input:
overriding the baseclass function, this routine will decline to
close a connection at the end of a transaction context. This allows
for reuse of connections.
### Response:
def close_connection(self, connection, force=False):
"""overriding the baseclass function, this routine will decline to
close a connection at the end of a transaction context. This allows
for reuse of connections."""
if force:
print('PostgresPooled - delegating connection closure')
try:
super(PostgresPooled, self).close_connection(connection,
force)
except self.operational_exceptions:
print('PostgresPooled - failed closing')
for name, conn in self.pool.iteritems():
if conn is connection:
break
del self.pool[name]
else:
print('PostgresPooled - refusing to close connection') |
def _partial_fit(model_and_meta, X, y, fit_params):
"""
Call partial_fit on a classifiers with training data X and y
Arguments
---------
model_and_meta : Tuple[Estimator, dict]
X, y : np.ndarray, np.ndarray
Training data
fit_params : dict
Extra keyword arguments to pass to partial_fit
Returns
-------
Results
A namedtuple with four fields: info, models, history, best
* info : Dict[model_id, List[Dict]]
Keys are integers identifying each model. Values are a
List of Dict
* models : Dict[model_id, Future[Estimator]]
A dictionary with the same keys as `info`. The values
are futures to the fitted models.
* history : List[Dict]
The history of model fitting for each model. Each element
of the list is a dictionary with the following elements:
* model_id : int
A superset of the keys for `info` and `models`.
* params : Dict[str, Any]
Parameters this model was trained with.
* partial_fit_calls : int
The number of *consecutive* partial fit calls at this stage in
this models training history.
* partial_fit_time : float
Time (in seconds) spent on this partial fit
* score : float
Score on the test set for the model at this point in history
* score_time : float
Time (in seconds) spent on this scoring.
* best : Tuple[model_id, Future[Estimator]]]
The estimator with the highest validation score in the final
round.
"""
with log_errors():
start = time()
model, meta = model_and_meta
if len(X):
model = deepcopy(model)
model.partial_fit(X, y, **(fit_params or {}))
meta = dict(meta)
meta["partial_fit_calls"] += 1
meta["partial_fit_time"] = time() - start
return model, meta | Call partial_fit on a classifiers with training data X and y
Arguments
---------
model_and_meta : Tuple[Estimator, dict]
X, y : np.ndarray, np.ndarray
Training data
fit_params : dict
Extra keyword arguments to pass to partial_fit
Returns
-------
Results
A namedtuple with four fields: info, models, history, best
* info : Dict[model_id, List[Dict]]
Keys are integers identifying each model. Values are a
List of Dict
* models : Dict[model_id, Future[Estimator]]
A dictionary with the same keys as `info`. The values
are futures to the fitted models.
* history : List[Dict]
The history of model fitting for each model. Each element
of the list is a dictionary with the following elements:
* model_id : int
A superset of the keys for `info` and `models`.
* params : Dict[str, Any]
Parameters this model was trained with.
* partial_fit_calls : int
The number of *consecutive* partial fit calls at this stage in
this models training history.
* partial_fit_time : float
Time (in seconds) spent on this partial fit
* score : float
Score on the test set for the model at this point in history
* score_time : float
Time (in seconds) spent on this scoring.
* best : Tuple[model_id, Future[Estimator]]]
The estimator with the highest validation score in the final
round. | Below is the the instruction that describes the task:
### Input:
Call partial_fit on a classifiers with training data X and y
Arguments
---------
model_and_meta : Tuple[Estimator, dict]
X, y : np.ndarray, np.ndarray
Training data
fit_params : dict
Extra keyword arguments to pass to partial_fit
Returns
-------
Results
A namedtuple with four fields: info, models, history, best
* info : Dict[model_id, List[Dict]]
Keys are integers identifying each model. Values are a
List of Dict
* models : Dict[model_id, Future[Estimator]]
A dictionary with the same keys as `info`. The values
are futures to the fitted models.
* history : List[Dict]
The history of model fitting for each model. Each element
of the list is a dictionary with the following elements:
* model_id : int
A superset of the keys for `info` and `models`.
* params : Dict[str, Any]
Parameters this model was trained with.
* partial_fit_calls : int
The number of *consecutive* partial fit calls at this stage in
this models training history.
* partial_fit_time : float
Time (in seconds) spent on this partial fit
* score : float
Score on the test set for the model at this point in history
* score_time : float
Time (in seconds) spent on this scoring.
* best : Tuple[model_id, Future[Estimator]]]
The estimator with the highest validation score in the final
round.
### Response:
def _partial_fit(model_and_meta, X, y, fit_params):
"""
Call partial_fit on a classifiers with training data X and y
Arguments
---------
model_and_meta : Tuple[Estimator, dict]
X, y : np.ndarray, np.ndarray
Training data
fit_params : dict
Extra keyword arguments to pass to partial_fit
Returns
-------
Results
A namedtuple with four fields: info, models, history, best
* info : Dict[model_id, List[Dict]]
Keys are integers identifying each model. Values are a
List of Dict
* models : Dict[model_id, Future[Estimator]]
A dictionary with the same keys as `info`. The values
are futures to the fitted models.
* history : List[Dict]
The history of model fitting for each model. Each element
of the list is a dictionary with the following elements:
* model_id : int
A superset of the keys for `info` and `models`.
* params : Dict[str, Any]
Parameters this model was trained with.
* partial_fit_calls : int
The number of *consecutive* partial fit calls at this stage in
this models training history.
* partial_fit_time : float
Time (in seconds) spent on this partial fit
* score : float
Score on the test set for the model at this point in history
* score_time : float
Time (in seconds) spent on this scoring.
* best : Tuple[model_id, Future[Estimator]]]
The estimator with the highest validation score in the final
round.
"""
with log_errors():
start = time()
model, meta = model_and_meta
if len(X):
model = deepcopy(model)
model.partial_fit(X, y, **(fit_params or {}))
meta = dict(meta)
meta["partial_fit_calls"] += 1
meta["partial_fit_time"] = time() - start
return model, meta |
def filter(self, **kwargs):
"""
Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self}
"""
self._filters.append(ReadsAlignmentsFilter(**kwargs).filter)
return self | Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self} | Below is the the instruction that describes the task:
### Input:
Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self}
### Response:
def filter(self, **kwargs):
"""
Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self}
"""
self._filters.append(ReadsAlignmentsFilter(**kwargs).filter)
return self |
def _handle_tag_removeobject(self):
"""Handle the RemoveObject tag."""
obj = _make_object("RemoveObject")
obj.CharacterId = unpack_ui16(self._src)
obj.Depth = unpack_ui16(self._src)
return obj | Handle the RemoveObject tag. | Below is the the instruction that describes the task:
### Input:
Handle the RemoveObject tag.
### Response:
def _handle_tag_removeobject(self):
"""Handle the RemoveObject tag."""
obj = _make_object("RemoveObject")
obj.CharacterId = unpack_ui16(self._src)
obj.Depth = unpack_ui16(self._src)
return obj |
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies() | Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true. | Below is the the instruction that describes the task:
### Input:
Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
### Response:
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies() |
def _poseFromQuad(self, quad=None):
'''
estimate the pose of the object plane using quad
setting:
self.rvec -> rotation vector
self.tvec -> translation vector
'''
if quad is None:
quad = self.quad
if quad.ndim == 3:
quad = quad[0]
# http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/
# Find the rotation and translation vectors.
img_pn = np.ascontiguousarray(quad[:, :2],
dtype=np.float32).reshape((4, 1, 2))
obj_pn = self.obj_points - self.obj_points.mean(axis=0)
retval, rvec, tvec = cv2.solvePnP(
obj_pn,
img_pn,
self.opts['cameraMatrix'],
self.opts['distCoeffs'],
flags=cv2.SOLVEPNP_P3P # because exactly four points are given
)
if not retval:
print("Couln't estimate pose")
return tvec, rvec | estimate the pose of the object plane using quad
setting:
self.rvec -> rotation vector
self.tvec -> translation vector | Below is the the instruction that describes the task:
### Input:
estimate the pose of the object plane using quad
setting:
self.rvec -> rotation vector
self.tvec -> translation vector
### Response:
def _poseFromQuad(self, quad=None):
'''
estimate the pose of the object plane using quad
setting:
self.rvec -> rotation vector
self.tvec -> translation vector
'''
if quad is None:
quad = self.quad
if quad.ndim == 3:
quad = quad[0]
# http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/
# Find the rotation and translation vectors.
img_pn = np.ascontiguousarray(quad[:, :2],
dtype=np.float32).reshape((4, 1, 2))
obj_pn = self.obj_points - self.obj_points.mean(axis=0)
retval, rvec, tvec = cv2.solvePnP(
obj_pn,
img_pn,
self.opts['cameraMatrix'],
self.opts['distCoeffs'],
flags=cv2.SOLVEPNP_P3P # because exactly four points are given
)
if not retval:
print("Couln't estimate pose")
return tvec, rvec |
def time_v_terminal_Stokes(D, rhop, rho, mu, V0, tol=1e-14):
r'''Calculates the time required for a particle in Stoke's regime only to
reach terminal velocity (approximately). An infinitely long period is
required theoretically, but with floating points, it is possible to
calculate the time required to come within a specified `tol` of that
terminal velocity.
.. math::
t_{term} = -\frac{1}{18\mu}\ln \left(\frac{D^2g\rho - D^2 g \rho_p
+ 18\mu V_{term}}{D^2g\rho - D^2 g \rho_p + 18\mu V_0 } \right) D^2
\rho_p
Parameters
----------
D : float
Diameter of the sphere, [m]
rhop : float
Particle density, [kg/m^3]
rho : float
Density of the surrounding fluid, [kg/m^3]
mu : float
Viscosity of the surrounding fluid [Pa*s]
V0 : float
Initial velocity of the particle, [m/s]
tol : float, optional
How closely to approach the terminal velocity - the target velocity is
the terminal velocity multiplied by 1 (+/-) this, depending on if the
particle is accelerating or decelerating, [-]
Returns
-------
t : float
Time for the particle to reach the terminal velocity to within the
specified or an achievable tolerance, [s]
Notes
-----
The symbolic solution was obtained via Wolfram Alpha.
If a solution cannot be obtained due to floating point error at very high
tolerance, an exception is raised - but first, the tolerance is doubled,
up to fifty times in an attempt to obtain the highest possible precision
while sill giving an answer. If at any point the tolerance is larger than
1%, an exception is also raised.
Examples
--------
>>> time_v_terminal_Stokes(D=1e-7, rhop=2200., rho=1.2, mu=1.78E-5, V0=1)
3.188003113787153e-06
>>> time_v_terminal_Stokes(D=1e-2, rhop=2200., rho=1.2, mu=1.78E-5, V0=1,
... tol=1e-30)
24800.636391802
'''
term = D*D*g*rho - D*D*g*rhop
denominator = term + 18.*mu*V0
v_term_base = g*D*D*(rhop-rho)/(18.*mu)
for i in range(50):
try:
if v_term_base < V0:
v_term = v_term_base*(1.0 + tol)
else:
v_term = v_term_base*(1.0 - tol)
numerator = term + 18.*mu*v_term
return log(numerator/denominator)*D*D*rhop/mu*-1/18.
except ValueError:
tol = tol*2
if tol > 0.01:
raise Exception('Could not find a solution')
raise Exception('Could not find a solution') | r'''Calculates the time required for a particle in Stoke's regime only to
reach terminal velocity (approximately). An infinitely long period is
required theoretically, but with floating points, it is possible to
calculate the time required to come within a specified `tol` of that
terminal velocity.
.. math::
t_{term} = -\frac{1}{18\mu}\ln \left(\frac{D^2g\rho - D^2 g \rho_p
+ 18\mu V_{term}}{D^2g\rho - D^2 g \rho_p + 18\mu V_0 } \right) D^2
\rho_p
Parameters
----------
D : float
Diameter of the sphere, [m]
rhop : float
Particle density, [kg/m^3]
rho : float
Density of the surrounding fluid, [kg/m^3]
mu : float
Viscosity of the surrounding fluid [Pa*s]
V0 : float
Initial velocity of the particle, [m/s]
tol : float, optional
How closely to approach the terminal velocity - the target velocity is
the terminal velocity multiplied by 1 (+/-) this, depending on if the
particle is accelerating or decelerating, [-]
Returns
-------
t : float
Time for the particle to reach the terminal velocity to within the
specified or an achievable tolerance, [s]
Notes
-----
The symbolic solution was obtained via Wolfram Alpha.
If a solution cannot be obtained due to floating point error at very high
tolerance, an exception is raised - but first, the tolerance is doubled,
up to fifty times in an attempt to obtain the highest possible precision
while sill giving an answer. If at any point the tolerance is larger than
1%, an exception is also raised.
Examples
--------
>>> time_v_terminal_Stokes(D=1e-7, rhop=2200., rho=1.2, mu=1.78E-5, V0=1)
3.188003113787153e-06
>>> time_v_terminal_Stokes(D=1e-2, rhop=2200., rho=1.2, mu=1.78E-5, V0=1,
... tol=1e-30)
24800.636391802 | Below is the the instruction that describes the task:
### Input:
r'''Calculates the time required for a particle in Stoke's regime only to
reach terminal velocity (approximately). An infinitely long period is
required theoretically, but with floating points, it is possible to
calculate the time required to come within a specified `tol` of that
terminal velocity.
.. math::
t_{term} = -\frac{1}{18\mu}\ln \left(\frac{D^2g\rho - D^2 g \rho_p
+ 18\mu V_{term}}{D^2g\rho - D^2 g \rho_p + 18\mu V_0 } \right) D^2
\rho_p
Parameters
----------
D : float
Diameter of the sphere, [m]
rhop : float
Particle density, [kg/m^3]
rho : float
Density of the surrounding fluid, [kg/m^3]
mu : float
Viscosity of the surrounding fluid [Pa*s]
V0 : float
Initial velocity of the particle, [m/s]
tol : float, optional
How closely to approach the terminal velocity - the target velocity is
the terminal velocity multiplied by 1 (+/-) this, depending on if the
particle is accelerating or decelerating, [-]
Returns
-------
t : float
Time for the particle to reach the terminal velocity to within the
specified or an achievable tolerance, [s]
Notes
-----
The symbolic solution was obtained via Wolfram Alpha.
If a solution cannot be obtained due to floating point error at very high
tolerance, an exception is raised - but first, the tolerance is doubled,
up to fifty times in an attempt to obtain the highest possible precision
while sill giving an answer. If at any point the tolerance is larger than
1%, an exception is also raised.
Examples
--------
>>> time_v_terminal_Stokes(D=1e-7, rhop=2200., rho=1.2, mu=1.78E-5, V0=1)
3.188003113787153e-06
>>> time_v_terminal_Stokes(D=1e-2, rhop=2200., rho=1.2, mu=1.78E-5, V0=1,
... tol=1e-30)
24800.636391802
### Response:
def time_v_terminal_Stokes(D, rhop, rho, mu, V0, tol=1e-14):
r'''Calculates the time required for a particle in Stoke's regime only to
reach terminal velocity (approximately). An infinitely long period is
required theoretically, but with floating points, it is possible to
calculate the time required to come within a specified `tol` of that
terminal velocity.
.. math::
t_{term} = -\frac{1}{18\mu}\ln \left(\frac{D^2g\rho - D^2 g \rho_p
+ 18\mu V_{term}}{D^2g\rho - D^2 g \rho_p + 18\mu V_0 } \right) D^2
\rho_p
Parameters
----------
D : float
Diameter of the sphere, [m]
rhop : float
Particle density, [kg/m^3]
rho : float
Density of the surrounding fluid, [kg/m^3]
mu : float
Viscosity of the surrounding fluid [Pa*s]
V0 : float
Initial velocity of the particle, [m/s]
tol : float, optional
How closely to approach the terminal velocity - the target velocity is
the terminal velocity multiplied by 1 (+/-) this, depending on if the
particle is accelerating or decelerating, [-]
Returns
-------
t : float
Time for the particle to reach the terminal velocity to within the
specified or an achievable tolerance, [s]
Notes
-----
The symbolic solution was obtained via Wolfram Alpha.
If a solution cannot be obtained due to floating point error at very high
tolerance, an exception is raised - but first, the tolerance is doubled,
up to fifty times in an attempt to obtain the highest possible precision
while sill giving an answer. If at any point the tolerance is larger than
1%, an exception is also raised.
Examples
--------
>>> time_v_terminal_Stokes(D=1e-7, rhop=2200., rho=1.2, mu=1.78E-5, V0=1)
3.188003113787153e-06
>>> time_v_terminal_Stokes(D=1e-2, rhop=2200., rho=1.2, mu=1.78E-5, V0=1,
... tol=1e-30)
24800.636391802
'''
term = D*D*g*rho - D*D*g*rhop
denominator = term + 18.*mu*V0
v_term_base = g*D*D*(rhop-rho)/(18.*mu)
for i in range(50):
try:
if v_term_base < V0:
v_term = v_term_base*(1.0 + tol)
else:
v_term = v_term_base*(1.0 - tol)
numerator = term + 18.*mu*v_term
return log(numerator/denominator)*D*D*rhop/mu*-1/18.
except ValueError:
tol = tol*2
if tol > 0.01:
raise Exception('Could not find a solution')
raise Exception('Could not find a solution') |
def extract(self, item, list_article_candidate):
"""Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image
"""
list_topimage = []
for article_candidate in list_article_candidate:
if article_candidate.topimage is not None:
# Changes a relative path of an image to the absolute path of the given url.
article_candidate.topimage = self.image_absoulte_path(item['url'], article_candidate.topimage)
list_topimage.append((article_candidate.topimage, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_topimage) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_topimage if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no topimage extracted by newspaper, return the first result of list_topimage.
return list_topimage[0][0]
else:
return list_newspaper[0][0] | Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image | Below is the the instruction that describes the task:
### Input:
Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image
### Response:
def extract(self, item, list_article_candidate):
"""Compares the extracted top images.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string (url), the most likely top image
"""
list_topimage = []
for article_candidate in list_article_candidate:
if article_candidate.topimage is not None:
# Changes a relative path of an image to the absolute path of the given url.
article_candidate.topimage = self.image_absoulte_path(item['url'], article_candidate.topimage)
list_topimage.append((article_candidate.topimage, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_topimage) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_topimage if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no topimage extracted by newspaper, return the first result of list_topimage.
return list_topimage[0][0]
else:
return list_newspaper[0][0] |
def check_chain_dir_exists(self, warn_migration=False):
"""
Checks to make sure there is a directory called ``Chains`` at the root of DATA_DIR_PATH
and creates it if it doesn't exist yet
"""
chain_path = os.path.join(self.DATA_DIR_PATH, 'Chains')
if not os.path.exists(chain_path):
try:
os.makedirs(chain_path)
logger.info("Created 'Chains' directory at %s " % chain_path)
except Exception as e:
logger.error("Could not create 'Chains' directory at %s %s" % (chain_path, e))
warn_migration = False
# Add a warning for migration purposes if we created a chain dir
if warn_migration and ROOT_INSTALL_PATH != self.DATA_DIR_PATH:
if os.path.exists(os.path.join(ROOT_INSTALL_PATH, 'Chains')):
logger.warning("[MIGRATION] You are now using the blockchain data at %s, but it appears you have existing data at %s/Chains" % (
chain_path, ROOT_INSTALL_PATH))
logger.warning(
"[MIGRATION] If you would like to use your existing data, please move any data at %s/Chains to %s " % (ROOT_INSTALL_PATH, chain_path))
logger.warning("[MIGRATION] Or you can continue using your existing data by starting your script with the `--datadir=.` flag") | Checks to make sure there is a directory called ``Chains`` at the root of DATA_DIR_PATH
and creates it if it doesn't exist yet | Below is the the instruction that describes the task:
### Input:
Checks to make sure there is a directory called ``Chains`` at the root of DATA_DIR_PATH
and creates it if it doesn't exist yet
### Response:
def check_chain_dir_exists(self, warn_migration=False):
"""
Checks to make sure there is a directory called ``Chains`` at the root of DATA_DIR_PATH
and creates it if it doesn't exist yet
"""
chain_path = os.path.join(self.DATA_DIR_PATH, 'Chains')
if not os.path.exists(chain_path):
try:
os.makedirs(chain_path)
logger.info("Created 'Chains' directory at %s " % chain_path)
except Exception as e:
logger.error("Could not create 'Chains' directory at %s %s" % (chain_path, e))
warn_migration = False
# Add a warning for migration purposes if we created a chain dir
if warn_migration and ROOT_INSTALL_PATH != self.DATA_DIR_PATH:
if os.path.exists(os.path.join(ROOT_INSTALL_PATH, 'Chains')):
logger.warning("[MIGRATION] You are now using the blockchain data at %s, but it appears you have existing data at %s/Chains" % (
chain_path, ROOT_INSTALL_PATH))
logger.warning(
"[MIGRATION] If you would like to use your existing data, please move any data at %s/Chains to %s " % (ROOT_INSTALL_PATH, chain_path))
logger.warning("[MIGRATION] Or you can continue using your existing data by starting your script with the `--datadir=.` flag") |
def find_unresolved_and_unreferenced_symbols(self):
"""Find any unresolved symbols, and unreferenced symbols from this scope.
:returns: ({unresolved}, {unreferenced})
"""
unresolved = set()
unreferenced = self._definitions.copy()
self._collect_unresolved_and_unreferenced(set(), set(), unresolved, unreferenced,
frozenset(self._definitions), start=True)
return unresolved, unreferenced - Scope.ALL_BUILTINS | Find any unresolved symbols, and unreferenced symbols from this scope.
:returns: ({unresolved}, {unreferenced}) | Below is the the instruction that describes the task:
### Input:
Find any unresolved symbols, and unreferenced symbols from this scope.
:returns: ({unresolved}, {unreferenced})
### Response:
def find_unresolved_and_unreferenced_symbols(self):
"""Find any unresolved symbols, and unreferenced symbols from this scope.
:returns: ({unresolved}, {unreferenced})
"""
unresolved = set()
unreferenced = self._definitions.copy()
self._collect_unresolved_and_unreferenced(set(), set(), unresolved, unreferenced,
frozenset(self._definitions), start=True)
return unresolved, unreferenced - Scope.ALL_BUILTINS |
def _connectionEstablished(self, transport):
'''Store a reference to our transport and write an open frame.'''
self.transport = transport
self.transport.writeOpen()
self.heartbeater.schedule() | Store a reference to our transport and write an open frame. | Below is the the instruction that describes the task:
### Input:
Store a reference to our transport and write an open frame.
### Response:
def _connectionEstablished(self, transport):
'''Store a reference to our transport and write an open frame.'''
self.transport = transport
self.transport.writeOpen()
self.heartbeater.schedule() |
def contract_variables(self, u, v):
"""Enforce u, v being the same variable in a binary quadratic model.
The resulting variable is labeled 'u'. Values of interactions between `v` and
variables that `u` interacts with are added to the corresponding interactions
of `u`.
Args:
u (variable):
Variable in the binary quadratic model.
v (variable):
Variable in the binary quadratic model.
Examples:
This example creates a binary quadratic model representing the K4 complete graph
and contracts node (variable) 3 into node 2. The interactions between
3 and its neighbors 1 and 4 are added to the corresponding interactions
between 2 and those same neighbors.
>>> import dimod
...
>>> linear = {1: 1, 2: 2, 3: 3, 4: 4}
>>> quadratic = {(1, 2): 12, (1, 3): 13, (1, 4): 14,
... (2, 3): 23, (2, 4): 24,
... (3, 4): 34}
>>> bqm = dimod.BinaryQuadraticModel(linear, quadratic, 0.5, dimod.SPIN)
>>> bqm.contract_variables(2, 3)
>>> 3 in bqm.linear
False
>>> bqm.quadratic[(1, 2)]
25
"""
adj = self.adj
if u not in adj:
raise ValueError("{} is not a variable in the binary quadratic model".format(u))
if v not in adj:
raise ValueError("{} is not a variable in the binary quadratic model".format(v))
# if there is an interaction between u, v it becomes linear for u
if v in adj[u]:
if self.vartype is Vartype.BINARY:
self.add_variable(u, adj[u][v])
elif self.vartype is Vartype.SPIN:
self.add_offset(adj[u][v])
else:
raise RuntimeError("unexpected vartype")
self.remove_interaction(u, v)
# all of the interactions that v has become interactions for u
neighbors = list(adj[v])
for w in neighbors:
self.add_interaction(u, w, adj[v][w])
self.remove_interaction(v, w)
# finally remove v
self.remove_variable(v) | Enforce u, v being the same variable in a binary quadratic model.
The resulting variable is labeled 'u'. Values of interactions between `v` and
variables that `u` interacts with are added to the corresponding interactions
of `u`.
Args:
u (variable):
Variable in the binary quadratic model.
v (variable):
Variable in the binary quadratic model.
Examples:
This example creates a binary quadratic model representing the K4 complete graph
and contracts node (variable) 3 into node 2. The interactions between
3 and its neighbors 1 and 4 are added to the corresponding interactions
between 2 and those same neighbors.
>>> import dimod
...
>>> linear = {1: 1, 2: 2, 3: 3, 4: 4}
>>> quadratic = {(1, 2): 12, (1, 3): 13, (1, 4): 14,
... (2, 3): 23, (2, 4): 24,
... (3, 4): 34}
>>> bqm = dimod.BinaryQuadraticModel(linear, quadratic, 0.5, dimod.SPIN)
>>> bqm.contract_variables(2, 3)
>>> 3 in bqm.linear
False
>>> bqm.quadratic[(1, 2)]
25 | Below is the the instruction that describes the task:
### Input:
Enforce u, v being the same variable in a binary quadratic model.
The resulting variable is labeled 'u'. Values of interactions between `v` and
variables that `u` interacts with are added to the corresponding interactions
of `u`.
Args:
u (variable):
Variable in the binary quadratic model.
v (variable):
Variable in the binary quadratic model.
Examples:
This example creates a binary quadratic model representing the K4 complete graph
and contracts node (variable) 3 into node 2. The interactions between
3 and its neighbors 1 and 4 are added to the corresponding interactions
between 2 and those same neighbors.
>>> import dimod
...
>>> linear = {1: 1, 2: 2, 3: 3, 4: 4}
>>> quadratic = {(1, 2): 12, (1, 3): 13, (1, 4): 14,
... (2, 3): 23, (2, 4): 24,
... (3, 4): 34}
>>> bqm = dimod.BinaryQuadraticModel(linear, quadratic, 0.5, dimod.SPIN)
>>> bqm.contract_variables(2, 3)
>>> 3 in bqm.linear
False
>>> bqm.quadratic[(1, 2)]
25
### Response:
def contract_variables(self, u, v):
"""Enforce u, v being the same variable in a binary quadratic model.
The resulting variable is labeled 'u'. Values of interactions between `v` and
variables that `u` interacts with are added to the corresponding interactions
of `u`.
Args:
u (variable):
Variable in the binary quadratic model.
v (variable):
Variable in the binary quadratic model.
Examples:
This example creates a binary quadratic model representing the K4 complete graph
and contracts node (variable) 3 into node 2. The interactions between
3 and its neighbors 1 and 4 are added to the corresponding interactions
between 2 and those same neighbors.
>>> import dimod
...
>>> linear = {1: 1, 2: 2, 3: 3, 4: 4}
>>> quadratic = {(1, 2): 12, (1, 3): 13, (1, 4): 14,
... (2, 3): 23, (2, 4): 24,
... (3, 4): 34}
>>> bqm = dimod.BinaryQuadraticModel(linear, quadratic, 0.5, dimod.SPIN)
>>> bqm.contract_variables(2, 3)
>>> 3 in bqm.linear
False
>>> bqm.quadratic[(1, 2)]
25
"""
adj = self.adj
if u not in adj:
raise ValueError("{} is not a variable in the binary quadratic model".format(u))
if v not in adj:
raise ValueError("{} is not a variable in the binary quadratic model".format(v))
# if there is an interaction between u, v it becomes linear for u
if v in adj[u]:
if self.vartype is Vartype.BINARY:
self.add_variable(u, adj[u][v])
elif self.vartype is Vartype.SPIN:
self.add_offset(adj[u][v])
else:
raise RuntimeError("unexpected vartype")
self.remove_interaction(u, v)
# all of the interactions that v has become interactions for u
neighbors = list(adj[v])
for w in neighbors:
self.add_interaction(u, w, adj[v][w])
self.remove_interaction(v, w)
# finally remove v
self.remove_variable(v) |
def interpn(*args, **kw):
"""Interpolation on N-D.
ai = interpn(x, y, z, ..., a, xi, yi, zi, ...)
where the arrays x, y, z, ... define a rectangular grid
and a.shape == (len(x), len(y), len(z), ...) are the values
interpolate at xi, yi, zi, ...
"""
method = kw.pop('method', 'cubic')
if kw:
raise ValueError("Unknown arguments: " % kw.keys())
nd = (len(args)-1)//2
if len(args) != 2*nd+1:
raise ValueError("Wrong number of arguments")
q = args[:nd]
qi = args[nd+1:]
a = args[nd]
for j in range(nd):
#print q[j].shape, a.shape
a = interp1d(q[j], a, axis=j, kind=method)(qi[j])
return a | Interpolation on N-D.
ai = interpn(x, y, z, ..., a, xi, yi, zi, ...)
where the arrays x, y, z, ... define a rectangular grid
and a.shape == (len(x), len(y), len(z), ...) are the values
interpolate at xi, yi, zi, ... | Below is the the instruction that describes the task:
### Input:
Interpolation on N-D.
ai = interpn(x, y, z, ..., a, xi, yi, zi, ...)
where the arrays x, y, z, ... define a rectangular grid
and a.shape == (len(x), len(y), len(z), ...) are the values
interpolate at xi, yi, zi, ...
### Response:
def interpn(*args, **kw):
"""Interpolation on N-D.
ai = interpn(x, y, z, ..., a, xi, yi, zi, ...)
where the arrays x, y, z, ... define a rectangular grid
and a.shape == (len(x), len(y), len(z), ...) are the values
interpolate at xi, yi, zi, ...
"""
method = kw.pop('method', 'cubic')
if kw:
raise ValueError("Unknown arguments: " % kw.keys())
nd = (len(args)-1)//2
if len(args) != 2*nd+1:
raise ValueError("Wrong number of arguments")
q = args[:nd]
qi = args[nd+1:]
a = args[nd]
for j in range(nd):
#print q[j].shape, a.shape
a = interp1d(q[j], a, axis=j, kind=method)(qi[j])
return a |
def has_all_changes_covered(self):
"""
Return `True` if all changes have been covered, `False` otherwise.
"""
for filename in self.files():
for hunk in self.file_source_hunks(filename):
for line in hunk:
if line.reason is None:
continue # line untouched
if line.status is False:
return False # line not covered
return True | Return `True` if all changes have been covered, `False` otherwise. | Below is the the instruction that describes the task:
### Input:
Return `True` if all changes have been covered, `False` otherwise.
### Response:
def has_all_changes_covered(self):
"""
Return `True` if all changes have been covered, `False` otherwise.
"""
for filename in self.files():
for hunk in self.file_source_hunks(filename):
for line in hunk:
if line.reason is None:
continue # line untouched
if line.status is False:
return False # line not covered
return True |
def _spawn(func, *args, **kwargs):
"""
Calls `func(*args, **kwargs)` in a daemon thread, and returns the (started)
Thread object.
"""
thr = Thread(target=func, args=args, kwargs=kwargs)
thr.daemon = True
thr.start()
return thr | Calls `func(*args, **kwargs)` in a daemon thread, and returns the (started)
Thread object. | Below is the the instruction that describes the task:
### Input:
Calls `func(*args, **kwargs)` in a daemon thread, and returns the (started)
Thread object.
### Response:
def _spawn(func, *args, **kwargs):
"""
Calls `func(*args, **kwargs)` in a daemon thread, and returns the (started)
Thread object.
"""
thr = Thread(target=func, args=args, kwargs=kwargs)
thr.daemon = True
thr.start()
return thr |
def Match(self, registry_key):
"""Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the keys match.
"""
key_path = registry_key.path.upper()
if self._key_path_prefix and self._key_path_suffix:
if (key_path.startswith(self._key_path_prefix) and
key_path.endswith(self._key_path_suffix)):
key_path_segment = key_path[
len(self._key_path_prefix):-len(self._key_path_suffix)]
if key_path_segment.startswith('ControlSet'.upper()):
try:
control_set = int(key_path_segment[10:], 10)
except ValueError:
control_set = None
# TODO: check if control_set is in bounds.
return control_set is not None
return key_path in (self._key_path_upper, self._wow64_key_path_upper) | Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the keys match. | Below is the the instruction that describes the task:
### Input:
Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the keys match.
### Response:
def Match(self, registry_key):
"""Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the keys match.
"""
key_path = registry_key.path.upper()
if self._key_path_prefix and self._key_path_suffix:
if (key_path.startswith(self._key_path_prefix) and
key_path.endswith(self._key_path_suffix)):
key_path_segment = key_path[
len(self._key_path_prefix):-len(self._key_path_suffix)]
if key_path_segment.startswith('ControlSet'.upper()):
try:
control_set = int(key_path_segment[10:], 10)
except ValueError:
control_set = None
# TODO: check if control_set is in bounds.
return control_set is not None
return key_path in (self._key_path_upper, self._wow64_key_path_upper) |
def classmethod(self, encoding):
"""Function decorator for class methods."""
# Add encodings for hidden self and cmd arguments.
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
def objc_class_method(objc_cls, objc_cmd, *args):
py_cls = ObjCClass(objc_cls)
py_cls.objc_cmd = objc_cmd
args = convert_method_arguments(encoding, args)
result = f(py_cls, *args)
if isinstance(result, ObjCClass):
result = result.ptr.value
elif isinstance(result, ObjCInstance):
result = result.ptr.value
return result
name = f.__name__.replace('_', ':')
self.add_class_method(objc_class_method, name, encoding)
return objc_class_method
return decorator | Function decorator for class methods. | Below is the the instruction that describes the task:
### Input:
Function decorator for class methods.
### Response:
def classmethod(self, encoding):
"""Function decorator for class methods."""
# Add encodings for hidden self and cmd arguments.
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
def objc_class_method(objc_cls, objc_cmd, *args):
py_cls = ObjCClass(objc_cls)
py_cls.objc_cmd = objc_cmd
args = convert_method_arguments(encoding, args)
result = f(py_cls, *args)
if isinstance(result, ObjCClass):
result = result.ptr.value
elif isinstance(result, ObjCInstance):
result = result.ptr.value
return result
name = f.__name__.replace('_', ':')
self.add_class_method(objc_class_method, name, encoding)
return objc_class_method
return decorator |
def reset(self):
"""Reset to the initial state, clearing the buffer and zeroing count and scanned."""
self.buffer.clear()
self._count = 0
self._scanned = 0
self._exhausted = False
self.request.pop("ExclusiveStartKey", None) | Reset to the initial state, clearing the buffer and zeroing count and scanned. | Below is the the instruction that describes the task:
### Input:
Reset to the initial state, clearing the buffer and zeroing count and scanned.
### Response:
def reset(self):
"""Reset to the initial state, clearing the buffer and zeroing count and scanned."""
self.buffer.clear()
self._count = 0
self._scanned = 0
self._exhausted = False
self.request.pop("ExclusiveStartKey", None) |
def sils_cut(T,f,c,d,h, conshdlr):
"""solve_sils -- solve the lot sizing problem with cutting planes
- start with a relaxed model
- used lazy constraints to elimitate fractional setup variables with cutting planes
Parameters:
- T: number of periods
- P: set of products
- f[t]: set-up costs (on period t)
- c[t]: variable costs
- d[t]: demand values
- h[t]: holding costs
Returns the final model solved, with all necessary cuts added.
"""
Ts = range(1,T+1)
model = sils(T,f,c,d,h)
y,x,I = model.data
# relax integer variables
for t in Ts:
model.chgVarType(y[t], "C")
model.addVar(vtype="B", name="fake") # for making the problem MIP
# compute D[i,j] = sum_{t=i}^j d[t]
D = {}
for t in Ts:
s = 0
for j in range(t,T+1):
s += d[j]
D[t,j] = s
#include the lot sizing constraint handler
model.includeConshdlr(conshdlr, "SILS", "Constraint handler for single item lot sizing",
sepapriority = 0, enfopriority = -1, chckpriority = -1, sepafreq = -1, propfreq = -1,
eagerfreq = -1, maxprerounds = 0, delaysepa = False, delayprop = False, needscons = False,
presoltiming = SCIP_PRESOLTIMING.FAST, proptiming = SCIP_PROPTIMING.BEFORELP)
conshdlr.data = D,Ts
model.data = y,x,I
return model | solve_sils -- solve the lot sizing problem with cutting planes
- start with a relaxed model
- used lazy constraints to elimitate fractional setup variables with cutting planes
Parameters:
- T: number of periods
- P: set of products
- f[t]: set-up costs (on period t)
- c[t]: variable costs
- d[t]: demand values
- h[t]: holding costs
Returns the final model solved, with all necessary cuts added. | Below is the the instruction that describes the task:
### Input:
solve_sils -- solve the lot sizing problem with cutting planes
- start with a relaxed model
- used lazy constraints to elimitate fractional setup variables with cutting planes
Parameters:
- T: number of periods
- P: set of products
- f[t]: set-up costs (on period t)
- c[t]: variable costs
- d[t]: demand values
- h[t]: holding costs
Returns the final model solved, with all necessary cuts added.
### Response:
def sils_cut(T,f,c,d,h, conshdlr):
"""solve_sils -- solve the lot sizing problem with cutting planes
- start with a relaxed model
- used lazy constraints to elimitate fractional setup variables with cutting planes
Parameters:
- T: number of periods
- P: set of products
- f[t]: set-up costs (on period t)
- c[t]: variable costs
- d[t]: demand values
- h[t]: holding costs
Returns the final model solved, with all necessary cuts added.
"""
Ts = range(1,T+1)
model = sils(T,f,c,d,h)
y,x,I = model.data
# relax integer variables
for t in Ts:
model.chgVarType(y[t], "C")
model.addVar(vtype="B", name="fake") # for making the problem MIP
# compute D[i,j] = sum_{t=i}^j d[t]
D = {}
for t in Ts:
s = 0
for j in range(t,T+1):
s += d[j]
D[t,j] = s
#include the lot sizing constraint handler
model.includeConshdlr(conshdlr, "SILS", "Constraint handler for single item lot sizing",
sepapriority = 0, enfopriority = -1, chckpriority = -1, sepafreq = -1, propfreq = -1,
eagerfreq = -1, maxprerounds = 0, delaysepa = False, delayprop = False, needscons = False,
presoltiming = SCIP_PRESOLTIMING.FAST, proptiming = SCIP_PROPTIMING.BEFORELP)
conshdlr.data = D,Ts
model.data = y,x,I
return model |
def _edit(self, pk):
"""
Edit function logic, override to implement different logic
returns Edit widget and related list or None
"""
is_valid_form = True
pages = get_page_args()
page_sizes = get_page_size_args()
orders = get_order_args()
get_filter_args(self._filters)
exclude_cols = self._filters.get_relation_cols()
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
# convert pk to correct type, if pk is non string type.
pk = self.datamodel.get_pk_value(item)
if request.method == "POST":
form = self.edit_form.refresh(request.form)
# fill the form with the suppressed cols, generated from exclude_cols
self._fill_form_exclude_cols(exclude_cols, form)
# trick to pass unique validation
form._id = pk
if form.validate():
self.process_form(form, False)
form.populate_obj(item)
try:
self.pre_update(item)
except Exception as e:
flash(str(e), "danger")
else:
if self.datamodel.edit(item):
self.post_update(item)
flash(*self.datamodel.message)
finally:
return None
else:
is_valid_form = False
else:
# Only force form refresh for select cascade events
form = self.edit_form.refresh(obj=item)
# Perform additional actions to pre-fill the edit form.
self.prefill_form(form, pk)
widgets = self._get_edit_widget(form=form, exclude_cols=exclude_cols)
widgets = self._get_related_views_widgets(
item,
filters={},
orders=orders,
pages=pages,
page_sizes=page_sizes,
widgets=widgets,
)
if is_valid_form:
self.update_redirect()
return widgets | Edit function logic, override to implement different logic
returns Edit widget and related list or None | Below is the the instruction that describes the task:
### Input:
Edit function logic, override to implement different logic
returns Edit widget and related list or None
### Response:
def _edit(self, pk):
"""
Edit function logic, override to implement different logic
returns Edit widget and related list or None
"""
is_valid_form = True
pages = get_page_args()
page_sizes = get_page_size_args()
orders = get_order_args()
get_filter_args(self._filters)
exclude_cols = self._filters.get_relation_cols()
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
# convert pk to correct type, if pk is non string type.
pk = self.datamodel.get_pk_value(item)
if request.method == "POST":
form = self.edit_form.refresh(request.form)
# fill the form with the suppressed cols, generated from exclude_cols
self._fill_form_exclude_cols(exclude_cols, form)
# trick to pass unique validation
form._id = pk
if form.validate():
self.process_form(form, False)
form.populate_obj(item)
try:
self.pre_update(item)
except Exception as e:
flash(str(e), "danger")
else:
if self.datamodel.edit(item):
self.post_update(item)
flash(*self.datamodel.message)
finally:
return None
else:
is_valid_form = False
else:
# Only force form refresh for select cascade events
form = self.edit_form.refresh(obj=item)
# Perform additional actions to pre-fill the edit form.
self.prefill_form(form, pk)
widgets = self._get_edit_widget(form=form, exclude_cols=exclude_cols)
widgets = self._get_related_views_widgets(
item,
filters={},
orders=orders,
pages=pages,
page_sizes=page_sizes,
widgets=widgets,
)
if is_valid_form:
self.update_redirect()
return widgets |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.