Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
372,100
|
def main(argv=None):
logging.basicConfig(level=logging.INFO)
server_func = functools.partial(server.main, argv=argv)
server_parser = server.attach_parser(default_subparser())
server_parser.set_defaults(_func=server_func)
args = default_parser().parse_args(argv)
args._func()
|
Entry point for the `simpl` command.
|
372,101
|
def _patch_prebuild(cls):
orig_run = cls.run
def new_run(self):
self.run_command("prebuild")
orig_run(self)
cls.run = new_run
|
Patch a setuptools command to depend on `prebuild`
|
372,102
|
def _dcm_to_q(self, dcm):
assert(dcm.shape == (3, 3))
q = np.zeros(4)
tr = np.trace(dcm)
if tr > 0:
s = np.sqrt(tr + 1.0)
q[0] = s * 0.5
s = 0.5 / s
q[1] = (dcm[2][1] - dcm[1][2]) * s
q[2] = (dcm[0][2] - dcm[2][0]) * s
q[3] = (dcm[1][0] - dcm[0][1]) * s
else:
dcm_i = np.argmax(np.diag(dcm))
dcm_j = (dcm_i + 1) % 3
dcm_k = (dcm_i + 2) % 3
s = np.sqrt((dcm[dcm_i][dcm_i] - dcm[dcm_j][dcm_j] -
dcm[dcm_k][dcm_k]) + 1.0)
q[dcm_i + 1] = s * 0.5
s = 0.5 / s
q[dcm_j + 1] = (dcm[dcm_i][dcm_j] + dcm[dcm_j][dcm_i]) * s
q[dcm_k + 1] = (dcm[dcm_k][dcm_i] + dcm[dcm_i][dcm_k]) * s
q[0] = (dcm[dcm_k][dcm_j] - dcm[dcm_j][dcm_k]) * s
return q
|
Create q from dcm
Reference:
- Shoemake, Quaternions,
http://www.cs.ucr.edu/~vbz/resources/quatut.pdf
:param dcm: 3x3 dcm array
returns: quaternion array
|
372,103
|
def error(message, *args, **kwargs):
if in kwargs:
end = kwargs[]
else:
end =
if len(args) == 0:
sys.stderr.write(message)
else:
sys.stderr.write(message % args)
sys.stderr.write(end)
sys.stderr.flush()
|
write a message to stderr
|
372,104
|
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(LongInteger, self).read(istream, kmip_version=kmip_version)
if self.length is not LongInteger.LENGTH:
raise exceptions.InvalidPrimitiveLength(
"invalid long integer length read; "
"expected: {0}, observed: {1}".format(
LongInteger.LENGTH, self.length))
self.value = unpack(, istream.read(self.length))[0]
self.validate()
|
Read the encoding of the LongInteger from the input stream.
Args:
istream (stream): A buffer containing the encoded bytes of a
LongInteger. Usually a BytearrayStream object. Required.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidPrimitiveLength: if the long integer encoding read in has
an invalid encoded length.
|
372,105
|
def do_repl(self):
from prompt_toolkit import prompt
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.contrib.completers import WordCompleter
self.options.quiet = False
proxy = self.open()
ps1 = proxy.session.name() + u
words = [, , ]
words += [x + for x in proxy.system.listMethods()]
history_file = os.path.join(config.config_dir, )
while True:
try:
try:
cmd = prompt(ps1, completer=WordCompleter(words),
auto_suggest=AutoSuggestFromHistory(),
history=FileHistory(history_file))
except KeyboardInterrupt:
cmd =
if not cmd:
print("Enter or for usage information, to exit.")
if cmd in {, }:
self.repl_usage()
continue
elif cmd in {, }:
print(repr(proxy).split(None, 1)[1])
continue
elif cmd in {}:
raise EOFError()
try:
method, raw_args = cmd.split(, 1)
except ValueError:
print("ERROR: not found")
continue
raw_args = raw_args.split()
args = self.cooked(raw_args)
self.execute(proxy, method, args)
except EOFError:
print(.format(proxy))
break
|
REPL for rTorrent XMLRPC commands.
|
372,106
|
def add_virtual_columns_proper_motion_gal2eq(self, long_in="ra", lat_in="dec", pm_long="pm_l", pm_lat="pm_b", pm_long_out="pm_ra", pm_lat_out="pm_dec",
name_prefix="__proper_motion_gal2eq",
right_ascension_galactic_pole=192.85,
declination_galactic_pole=27.12,
propagate_uncertainties=False,
radians=False):
kwargs = dict(**locals())
kwargs.pop()
kwargs[] = True
self.add_virtual_columns_proper_motion_eq2gal(**kwargs)
|
Transform/rotate proper motions from galactic to equatorial coordinates.
Inverse of :py:`add_virtual_columns_proper_motion_eq2gal`
|
372,107
|
def run_cli(
executable,
mets_url=None,
resolver=None,
workspace=None,
page_id=None,
log_level=None,
input_file_grp=None,
output_file_grp=None,
parameter=None,
working_dir=None,
):
workspace = _get_workspace(workspace, resolver, mets_url, working_dir)
args = [executable, , workspace.directory]
args += [, mets_url]
if log_level:
args += [, log_level]
if page_id:
args += [, page_id]
if input_file_grp:
args += [, input_file_grp]
if output_file_grp:
args += [, output_file_grp]
if parameter:
args += [, parameter]
log.debug("Running subprocess ", .join(args))
return subprocess.call(args)
|
Create a workspace for mets_url and run MP CLI through it
|
372,108
|
def transformByDistance(wV, subModel, alphabetSize=4):
nc = [0.0]*alphabetSize
for i in xrange(0, alphabetSize):
j = wV[i]
k = subModel[i]
for l in xrange(0, alphabetSize):
nc[l] += j * k[l]
return nc
|
transform wV by given substitution matrix
|
372,109
|
def array_to_image(
arr, mask=None, img_format="png", color_map=None, **creation_options
):
img_format = img_format.lower()
if len(arr.shape) < 3:
arr = np.expand_dims(arr, axis=0)
if color_map is not None and isinstance(color_map, dict):
arr = _apply_discrete_colormap(arr, color_map)
elif color_map is not None:
arr = np.transpose(color_map[arr][0], [2, 0, 1]).astype(np.uint8)
if img_format == "webp" and arr.shape[0] == 1:
arr = np.repeat(arr, 3, axis=0)
if mask is not None and img_format != "jpeg":
nbands = arr.shape[0] + 1
else:
nbands = arr.shape[0]
output_profile = dict(
driver=img_format,
dtype=arr.dtype,
count=nbands,
height=arr.shape[1],
width=arr.shape[2],
)
output_profile.update(creation_options)
with MemoryFile() as memfile:
with memfile.open(**output_profile) as dst:
dst.write(arr, indexes=list(range(1, arr.shape[0] + 1)))
if mask is not None and img_format != "jpeg":
dst.write(mask.astype(arr.dtype), indexes=nbands)
return memfile.read()
|
Translate numpy ndarray to image buffer using GDAL.
Usage
-----
tile, mask = rio_tiler.utils.tile_read(......)
with open('test.jpg', 'wb') as f:
f.write(array_to_image(tile, mask, img_format="jpeg"))
Attributes
----------
arr : numpy ndarray
Image array to encode.
mask: numpy ndarray, optional
Mask array
img_format: str, optional
Image format to return (default: 'png').
List of supported format by GDAL: https://www.gdal.org/formats_list.html
color_map: numpy.ndarray or dict, optional
color_map can be either a (256, 3) array or RGB triplet
(e.g. [[255, 255, 255],...]) mapping each 1D pixel value rescaled
from 0 to 255
OR
it can be a dictionary of discrete values
(e.g. { 1.3: [255, 255, 255], 2.5: [255, 0, 0]}) mapping any pixel value to a triplet
creation_options: dict, optional
Image driver creation options to pass to GDAL
Returns
-------
bytes
|
372,110
|
def _Import(self, t):
self._fill("import ")
for i, (name,asname) in enumerate(t.names):
if i != 0:
self._write(", ")
self._write(name)
if asname is not None:
self._write(" as "+asname)
|
Handle "import xyz.foo".
|
372,111
|
def stop_apppool(name):
*MyTestPool
ps_cmd = [, r"".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret[] == 0
|
Stop an IIS application pool.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the App Pool to stop.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.stop_apppool name='MyTestPool'
|
372,112
|
def register(cls, associations, backend, style_aliases={}):
if backend not in cls.registry:
cls.registry[backend] = {}
cls.registry[backend].update(associations)
groups = Options._option_groups
if backend not in cls._options:
cls._options[backend] = OptionTree([], groups=groups)
if backend not in cls._custom_options:
cls._custom_options[backend] = {}
for view_class, plot in cls.registry[backend].items():
expanded_opts = [opt for key in plot.style_opts
for opt in style_aliases.get(key, [])]
style_opts = sorted(set(opt for opt in (expanded_opts + plot.style_opts)
if opt not in plot._disabled_opts))
plot_opts = [k for k in plot.params().keys() if k not in []]
with param.logging_level():
plot.style_opts = style_opts
plot_opts = Keywords(plot_opts, target=view_class.__name__)
style_opts = Keywords(style_opts, target=view_class.__name__)
opt_groups = {: Options(allowed_keywords=plot_opts),
: Options(allowed_keywords=Options._output_allowed_kws),
: Options(allowed_keywords=style_opts),
: Options(framewise=False, axiswise=False,
allowed_keywords=[,
])}
name = view_class.__name__
cls._options[backend][name] = opt_groups
|
Register the supplied dictionary of associations between
elements and plotting classes to the specified backend.
|
372,113
|
def spev(t_int, C, deg, x, cov_C=None, M_spline=False, I_spline=False, n=0):
C = scipy.asarray(C, dtype=float)
t_int = scipy.asarray(t_int, dtype=float)
if (t_int != scipy.sort(t_int)).any():
raise ValueError("Knots must be in increasing order!")
if n > deg:
return scipy.zeros_like(x, dtype=float)
if I_spline:
if cov_C is not None:
cov_C = scipy.asarray(cov_C)
if cov_C.ndim == 1:
cov_C = cov_C[1:]
elif cov_C.ndim == 2:
cov_C = cov_C[1:, 1:]
if n > 0:
return spev(
t_int, C[1:], deg - 1, x,
cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1
)
M_spline = True
if n > 0:
if M_spline:
t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg))
C = (deg + 1.0) * (
C[1:] / (t[deg + 2:len(t_int) + 2 * deg] - t[1:len(t_int) + deg - 1]) -
C[:-1] / (t[deg + 1:len(t_int) + 2 * deg - 1] - t[:len(t_int) + deg - 2])
)
else:
C = C[1:] - C[:-1]
return spev(
t_int, C, deg - 1, x,
cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1
)
if len(C) != len(t_int) + deg - 1:
raise ValueError("Length of C must be equal to M + deg - 1!")
t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg))
B = scipy.zeros((deg + 1, len(t) - 1, len(x)))
d = 0
for i in xrange(deg, deg + len(t_int) - 2 + 1):
mask = (t[i] <= x) & (
(x < t[i + 1]) | ((i == deg + len(t_int) - 2) & (x == t[-1]))
)
B[d, i, mask] = 1.0 / (t[i + 1] - t[i]) if M_spline else 1.0
for d in xrange(1, deg + 1):
for i in xrange(deg - d, deg + len(t_int) - 2 + 1):
if t[i + d] != t[i]:
v = (x - t[i]) * B[d - 1, i, :]
if not M_spline:
v /= t[i + d] - t[i]
B[d, i, :] += v
if t[i + d + 1] != t[i + 1]:
v = (t[i + d + 1] - x) * B[d - 1, i + 1, :]
if not M_spline:
v /= t[i + d + 1] - t[i + 1]
B[d, i, :] += v
if M_spline and ((t[i + d] != t[i]) or (t[i + d + 1] != t[i + 1])):
B[d, i, :] *= (d + 1) / (d * (t[i + d + 1] - t[i]))
B = B[deg, 0:len(C), :].T
if I_spline:
I = scipy.zeros_like(B)
for i in xrange(0, len(C)):
for m in xrange(i, len(C)):
I[:, i] += (t[m + deg + 1] - t[m]) * B[:, m] / (deg + 1.0)
B = I
y = B.dot(C)
if cov_C is not None:
cov_C = scipy.asarray(cov_C)
if cov_C.ndim == 1:
cov_C = scipy.diag(cov_C)
cov_y = B.dot(cov_C).dot(B.T)
return (y, cov_y)
else:
return y
|
Evaluate a B-, M- or I-spline with the specified internal knots, order and coefficients.
`deg` boundary knots are appended at both sides of the domain.
The zeroth order basis functions are modified to ensure continuity at the
right-hand boundary.
Note that the I-splines include the :math:`i=0` case in order to have a "DC
offset". This way your functions do not have to start at zero. If you want
to not include this, simply set the first coefficient in `C` to zero.
Parameters
----------
t_int : array of float, (`M`,)
The internal knot locations. Must be monotonic (this is NOT checked).
C : array of float, (`M + deg - 1`,)
The coefficients applied to the basis functions.
deg : nonnegative int
The polynomial degree to use.
x : array of float, (`N`,)
The locations to evaluate the spline at.
cov_C : array of float, (`M + deg - 1`,) or (`M + deg - 1`, `M + deg - 1`), optional
The covariance matrix of the coefficients. If a 1d array is passed, this
is treated as the variance. If None, then the uncertainty is not
computed.
M_spline : bool, optional
If True, compute the M-spline instead of the B-spline. M-splines are
normalized to integrate to unity, as opposed to B-splines which sum to
unity at all points. Default is False (compute B-spline).
I_spline : bool, optional
If True, compute the I-spline instead of the B-spline. Note that this
will override `M_spline`. I-splines are the integrals of the M-splines,
and hence ensure curves are monotonic if all coefficients are of the
same sign. Note that the I-splines returned will be of polynomial degree
`deg` (i.e., the integral of what is returned from calling the function
with `deg=deg-1` and `M_spline=True`. Default is False (compute B-spline
or M-spline).
n : int, optional
The derivative order to compute. Default is 0. If `n>d`, all zeros are
returned (i.e., the discontinuities are not included).
Returns
-------
`y` or (`y`, `cov_y`): The values (and possibly uncertainties) of the spline
at the specified locations.
|
372,114
|
def _find_lib_path():
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
amalgamation_lib_path = os.path.join(curr_path, )
if os.path.exists(amalgamation_lib_path) and os.path.isfile(amalgamation_lib_path):
lib_path = [amalgamation_lib_path]
return lib_path
else:
logging.info()
try:
from mxnet.libinfo import find_lib_path
lib_path = find_lib_path()
return lib_path
except ImportError:
libinfo_path = os.path.join(curr_path, )
if os.path.exists(libinfo_path) and os.path.isfile(libinfo_path):
libinfo = {: libinfo_path}
exec(compile(open(libinfo_path, "rb").read(), libinfo_path, ), libinfo, libinfo)
lib_path = libinfo[]()
return lib_path
else:
raise RuntimeError( % libinfo_path)
|
Find mxnet library.
|
372,115
|
def _fromiter(it, dtype, count, progress, log):
if progress > 0:
it = _iter_withprogress(it, progress, log)
if count is not None:
a = np.fromiter(it, dtype=dtype, count=count)
else:
a = np.fromiter(it, dtype=dtype)
return a
|
Utility function to load an array from an iterator.
|
372,116
|
def enrich_sentences_with_NLP(self, all_sentences):
if not self.has_NLP_support():
raise NotImplementedError(
f"Language {self.lang} not available in spacy beyond tokenization"
)
if len(all_sentences) == 0:
return
if self.model.has_pipe("sentencizer"):
self.model.remove_pipe("sentencizer")
self.logger.debug(
f"Removed sentencizer () from model. "
f"Now in pipeline: {self.model.pipe_names}"
)
if self.model.has_pipe("sentence_boundary_detector"):
self.model.remove_pipe(name="sentence_boundary_detector")
self.model.add_pipe(
set_custom_boundary, before="parser", name="sentence_boundary_detector"
)
sentence_batches = self._split_sentences_by_char_limit(
all_sentences, self.model.max_length
)
for sentence_batch in sentence_batches:
custom_tokenizer = TokenPreservingTokenizer(self.model.vocab)
doc = custom_tokenizer(sentence_batch)
doc.user_data = sentence_batch
for name, proc in self.model.pipeline:
doc = proc(doc)
try:
assert doc.is_parsed
except Exception:
self.logger.exception(f"{doc} was not parsed")
for sent, current_sentence_obj in zip(doc.sents, sentence_batch):
parts = defaultdict(list)
for i, token in enumerate(sent):
parts["lemmas"].append(token.lemma_)
parts["pos_tags"].append(token.tag_)
parts["ner_tags"].append(
token.ent_type_ if token.ent_type_ else "O"
)
head_idx = (
0 if token.head is token else token.head.i - sent[0].i + 1
)
parts["dep_parents"].append(head_idx)
parts["dep_labels"].append(token.dep_)
current_sentence_obj.pos_tags = parts["pos_tags"]
current_sentence_obj.lemmas = parts["lemmas"]
current_sentence_obj.ner_tags = parts["ner_tags"]
current_sentence_obj.dep_parents = parts["dep_parents"]
current_sentence_obj.dep_labels = parts["dep_labels"]
yield current_sentence_obj
|
Enrich a list of fonduer Sentence objects with NLP features. We merge
and process the text of all Sentences for higher efficiency.
:param all_sentences: List of fonduer Sentence objects for one document
:return:
|
372,117
|
def emit(
self,
record):
self.debug_log()
try:
record = self.format_record(
record)
except Exception as e:
self.write_log(
% str(e))
self.write_log(
traceback.format_exc())
return
if not self.is_shutting_down(shutdown_event=self.shutdown_event) \
and self.sleep_interval > 0.1:
try:
self.debug_log(
.format(
self.queue))
self.queue.put(
record)
except Exception as e:
self.write_log(
)
else:
self.publish_to_splunk(
payload=record)
self.debug_log()
|
emit
Emit handler for queue-ing message for
the helper thread to send to Splunk on the ``self.sleep_interval``
:param record: LogRecord to send to Splunk
https://docs.python.org/3/library/logging.html
|
372,118
|
def preprocessFastqs(fastqFNs, seqFNPrefix, offsetFN, abtFN, areUniform, logger):
<DIR>/seqs.npy<DIR>/offsets.npy<DIR>/about.npy
seqArray = []
tempFileId = 0
seqsPerFile = 1000000
maxSeqLen = -1
numSeqs = 0
subSortFNs = []
for fnID, fn in enumerate(fastqFNs):
logger.info(\)
if fn.endswith():
fp = gzip.open(fn, )
else:
fp = open(fn, )
i = -1
for line in fp:
if i % 4 == 0:
seqArray.append((line.strip()+, fnID, i/4))
if len(seqArray) == seqsPerFile:
if not areUniform or maxSeqLen == -1:
maxSeqLen = 0
for seq, fID, seqID in seqArray:
if len(seq) > maxSeqLen:
maxSeqLen = len(seq)
tempFN = seqFNPrefix++str(tempFileId)+
subSortFNs.append(tempFN)
tempArray = np.lib.format.open_memmap(tempFN, , +str(maxSeqLen)+, (len(seqArray),))
tempArray[:] = sorted(seqArray)
numSeqs += len(seqArray)
del tempArray
tempFileId += 1
seqArray = []
i += 1
fp.close()
if len(seqArray) > 0:
if not areUniform or maxSeqLen == -1:
maxSeqLen = 0
for seq, fID, seqID in seqArray:
if len(seq) > maxSeqLen:
maxSeqLen = len(seq)
tempFN = seqFNPrefix++str(tempFileId)+
subSortFNs.append(tempFN)
tempArray = np.lib.format.open_memmap(tempFN, , +str(maxSeqLen)+, (len(seqArray),))
tempArray[:] = sorted(seqArray)
numSeqs += len(seqArray)
del tempArray
tempFileId += 1
seqArray = []
logger.info(+str(numSeqs)+)
iters = []
for fn in subSortFNs:
iters.append(customiter(np.load(fn, )))
tempFN = seqFNPrefix+
fp = open(tempFN, )
aboutFile = np.lib.format.open_memmap(abtFN, , , (numSeqs,))
ind = 0
for tup in heapq.merge(*iters):
(seq, fID, seqID) = tup
aboutFile[ind] = (fID, seqID)
fp.write(seq)
ind += 1
fp.close()
for fn in subSortFNs:
os.remove(fn)
del seqArray
seqArray = np.memmap(tempFN)
if areUniform:
uniformLength = maxSeqLen
else:
uniformLength = 0
logger.info()
MSBWTGen.writeSeqsToFiles(seqArray, seqFNPrefix, offsetFN, uniformLength)
del seqArray
os.remove(tempFN)
|
This function does the grunt work behind string extraction for fastq files
@param fastqFNs - a list of .fq filenames for parsing
@param seqFNPrefix - this is always of the form '<DIR>/seqs.npy'
@param offsetFN - this is always of the form '<DIR>/offsets.npy'
@param abtFN - this is always of the form '<DIR>/about.npy'
@param areUniform - True if all sequences are of uniform length
@param logger - logger object for output
|
372,119
|
def parse_url(url):
scheme, dest = url.split()
host = None
ssl_context = None
if scheme == :
host, port = dest.split() if in dest else (dest, 2101)
elif scheme == :
host, port = dest.split() if in dest else (dest, 2601)
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ssl_context.verify_mode = ssl.CERT_NONE
elif scheme == :
host, port = dest.split() if in dest else (dest, 115200)
else:
raise ValueError("Invalid scheme " % scheme)
return (scheme, host, int(port), ssl_context)
|
Parse a Elk connection string
|
372,120
|
def site_url(self, url):
regex = re.compile(
r
r
r
r
r
r
r, re.IGNORECASE)
if re.match(, url):
if re.search(regex, url):
self.__site_url = url
else:
raise PybooruError("Invalid URL: {0}".format(url))
else:
raise PybooruError(
"Invalid URL scheme, use HTTP or HTTPS: {0}".format(url))
|
URL setter and validator for site_url property.
Parameters:
url (str): URL of on Moebooru/Danbooru based sites.
Raises:
PybooruError: When URL scheme or URL are invalid.
|
372,121
|
def get_version(extension, workflow_file):
if extension == and two_seven_compatible(workflow_file):
return
elif extension == :
return yaml.load(open(workflow_file))[]
else:
try:
return [l.lstrip() for l in workflow_file.splitlines() if in l.split()][0]
except IndexError:
return
|
Determines the version of a .py, .wdl, or .cwl file.
|
372,122
|
def main():
config = context.config
config.set_main_option("sqlalchemy.url", config.get_main_option())
run_migrations_online(config)
|
main.
|
372,123
|
def fetch_ticker(self) -> Ticker:
return self._fetch(, self.market.code)(self._ticker)()
|
Fetch the market ticker.
|
372,124
|
def expand_to_one_hot(data,expand = True,use_alternative=False):
header_dict = {:0,:1,:2,:3,:4,:5,:6,:7,:8,:9,:10}
new_data = []
for entry in data:
temp = {}
if expand == True:
if entry[header_dict["SEX1"]] == "FEMALE":
temp[] = 1
else:
temp[] = 0
if entry[header_dict["ALCABUS"]] == :
temp[] = 1
else:
temp[] = 0
if entry[header_dict[]] == :
temp[] = 1
else:
temp[] = 0
if entry[header_dict[]] == :
temp[] = 1
else:
temp[] = 0
race_cats = [,,,,,]
for cat in race_cats:
if entry[header_dict[]] == cat:
temp[+cat] = 1
else:
temp[+cat] = 0
release_age_cats = [,, , \
,,,]
for cat in release_age_cats:
if entry[header_dict[]] == cat:
temp[+cat] = 1
else:
temp[+cat] = 0
time_served_cats = [,,,,, \
,,,]
for cat in time_served_cats:
if entry[header_dict[]] == cat:
temp[+cat] = 1
else:
temp[+cat] = 0
prior_arrest_cats = [,,,,, \
,,,,]
for cat in prior_arrest_cats:
if entry[header_dict[]] == cat:
temp[+cat] = 1
else:
temp[+cat] = 0
conditional_release =[,, , \
]
unconditional_release = [,,, \
]
other_release = [,,,,,, \
,,,,,]
if entry[header_dict[]] in conditional_release:
temp[] = 1
temp[] = 0
temp[] = 0
elif entry[header_dict[]] in unconditional_release:
temp[] = 0
temp[] = 1
temp[] = 0
else:
temp[] = 0
temp[] = 0
temp[] = 1
first_arrest_cats = [,,,,]
for cat in first_arrest_cats:
if entry[header_dict[]] == cat:
temp[+cat] = 1
else:
temp[+cat] = 0
else:
temp[] = entry[]
temp[] = entry[]
temp[] = entry[]
temp[] = entry[]
temp[] = entry[]
temp[] = entry[]
temp[] = entry[]
temp[] = entry[]
temp[] = entry[]
try:
bdate = datetime.date(int(entry[]),int(entry[]), int(entry[]))
first_arrest = datetime.date(int(entry[]),int(entry[]),int(entry[]))
first_arrest_age = first_arrest - bdate
temp[] = first_arrest_age.days
except:
temp[] = 0
new_data.append(temp)
fin = [[int(entry[key]) for key in entry.keys()] for entry in new_data]
return fin
|
with open("brandon_testing/test_"+str(time.clock())+".csv","w") as f:
writer = csv.writer(f,delimiter=",")
for row in fin:
writer.writerow(row)
|
372,125
|
def create_game(
self,
map_name,
bot_difficulty=sc_pb.VeryEasy,
bot_race=sc_common.Random,
bot_first=False):
self._controller.ping()
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if map_name not in self._saved_maps:
self._controller.save_map(map_inst.path, map_data)
self._saved_maps.add(map_name)
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(map_path=map_inst.path, map_data=map_data),
disable_fog=False)
if not bot_first:
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(
type=sc_pb.Computer, race=bot_race, difficulty=bot_difficulty)
if bot_first:
create.player_setup.add(type=sc_pb.Participant)
self._controller.create_game(create)
|
Create a game, one remote agent vs the specified bot.
Args:
map_name: The map to use.
bot_difficulty: The difficulty of the bot to play against.
bot_race: The race for the bot.
bot_first: Whether the bot should be player 1 (else is player 2).
|
372,126
|
def dict_copy(func):
"copy dict args, to avoid modifying caller's copy"
def proxy(*args, **kwargs):
new_args = []
new_kwargs = {}
for var in kwargs:
if isinstance(kwargs[var], dict):
new_kwargs[var] = dict(kwargs[var])
else:
new_kwargs[var] = kwargs[var]
for arg in args:
if isinstance(arg, dict):
new_args.append(dict(arg))
else:
new_args.append(arg)
return func(*new_args, **new_kwargs)
return proxy
|
copy dict args, to avoid modifying caller's copy
|
372,127
|
def get_json_result(results, n=10):
s = []
last = -1
for res in results[:min(len(results), n)]:
if res[] < last*0.5 and res[] < 0.05:
break
if res[] < 0.01:
break
s.append(res)
last = res[]
return json.dumps(s)
|
Return the top `n` results as a JSON list.
>>> results = [{'probability': 0.65,
... 'whatever': 'bar'},
... {'probability': 0.21,
... 'whatever': 'bar'},
... {'probability': 0.05,
... 'whatever': 'bar'},]
>>> get_json_result(results, n=10)
[{'\\alpha': 0.65}, {'\\propto': 0.25}, {'\\varpropto': 0.0512}]
|
372,128
|
async def readline(self) -> bytes:
while True:
line = self._serial_instance.readline()
if not line:
await asyncio.sleep(self._asyncio_sleep_time)
else:
return line
|
Reads one line
>>> # Keeps waiting for a linefeed incase there is none in the buffer
>>> await test.readline()
:returns: bytes forming a line
|
372,129
|
def _normalize_tabular_data(tabular_data, headers):
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
if hasattr(tabular_data.values, "__call__"):
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values()))
elif hasattr(tabular_data, "index"):
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesns a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0:
headers = list(map(_text_type, range(len(rows[0]))))
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0]))
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
|
Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
|
372,130
|
def set_frequency(self, host, sem=None, interval=None):
sem = sem or self.sem
interval = self.interval if interval is None else interval
frequency = Frequency(sem, interval, host)
frequencies = {host: frequency}
self.update_frequency(frequencies)
return frequency
|
Set frequency for host with sem and interval.
|
372,131
|
def unblock_pin(ctx, puk, new_pin):
controller = ctx.obj[]
if not puk:
puk = click.prompt(
, default=, show_default=False,
hide_input=True, err=True)
if not new_pin:
new_pin = click.prompt(
, default=,
show_default=False, hide_input=True, err=True)
controller.unblock_pin(puk, new_pin)
|
Unblock the PIN.
Reset the PIN using the PUK code.
|
372,132
|
def __get_html(self, body=None):
output = []
output.append("<html>")
output.append("<head>")
for javascript in (self.__jquery_javascript,
self.__crittercism_javascript,
self.__reporter_javascript):
output.append("<script type=\"text/javascript\">")
output.append(javascript)
output.append("</script>")
output.append("<style type=\"text/css\">")
output.append(self.__style)
output.append("</style>")
output.append("</head>")
if body is not None:
output.append(body)
else:
output.append("<body>")
output.append("<div id=\"report\">")
output.append("</div>")
output.append("</body>")
output.append("</html>")
return "\n".join(output)
|
Returns the html content with given body tag content.
:param body: Body tag content.
:type body: unicode
:return: Html.
:rtype: unicode
|
372,133
|
def commitAndCloseEditor(self):
editor = self.sender()
try:
self.commitData.emit(editor)
except AttributeError:
pass
self.closeEditor.emit(editor, QAbstractItemDelegate.NoHint)
|
Commit and close editor
|
372,134
|
def _dictlist_to_lists(dl, *keys):
atestbazazbawowbtestzazwow
lists = []
for k in keys:
lists.append([])
for item in dl:
for i, key in enumerate(keys):
x = item[key]
if isinstance(x, unicode):
x = str(x)
lists[i].append(x)
return lists
|
convert a list of dictionaries to a dictionary of lists
>>> dl = [{'a': 'test', 'b': 3}, {'a': 'zaz', 'b': 444},
{'a': 'wow', 'b': 300}]
>>> _dictlist_to_lists(dl)
(['test', 'zaz', 'wow'], [3, 444, 300])
|
372,135
|
def union_overlapping(intervals):
disjoint_intervals = []
for interval in intervals:
if disjoint_intervals and disjoint_intervals[-1].overlaps(interval):
disjoint_intervals[-1] = disjoint_intervals[-1].union(interval)
else:
disjoint_intervals.append(interval)
return disjoint_intervals
|
Union any overlapping intervals in the given set.
|
372,136
|
def tqdm(self, desc, total, leave, initial=0):
return tqdm_notebook(desc=desc, total=total, leave=leave, initial=initial)
|
Extension point. Override to provide custom options to tqdm_notebook initializer.
:param desc: Description string
:param total: Total number of updates
:param leave: Leave progress bar when done
:return: new progress bar
:param initial: Initial counter state
|
372,137
|
def _variable_inputs(self, op):
if op.name not in self._vinputs:
self._vinputs[op.name] = np.array([t.op in self.between_ops or t in self.model_inputs for t in op.inputs])
return self._vinputs[op.name]
|
Return which inputs of this operation are variable (i.e. depend on the model inputs).
|
372,138
|
def move(self, x, y):
self._x = int(round(x * 2, 0))
self._y = int(round(y * 2, 0))
|
Move the drawing cursor to the specified position.
:param x: The column (x coord) for the location to check.
:param y: The line (y coord) for the location to check.
|
372,139
|
def prepare_inventory(self):
if self.inventory is None:
self.inventory = os.path.join(self.private_data_dir, "inventory")
|
Prepares the inventory default under ``private_data_dir`` if it's not overridden by the constructor.
|
372,140
|
def _get_game_number(cls, gid_path):
game_number = str(gid_path[len(gid_path)-2:len(gid_path)-1])
if game_number.isdigit():
return int(game_number)
else:
for char in reversed(gid_path):
if char.isdigit():
return int(char)
raise MlbAmException(.format(gid_path))
|
Game Number
:param gid_path: game logs directory path
:return: game number(int)
|
372,141
|
def get_context(request, context=None):
if context is None:
context = {}
network_config = getattr(settings, , {})
context[] = policy.check(
(("compute", "os_compute_api:servers:create"),), request)
context[] = _quota_exceeded(request, )
context[] = policy.check(
(("network", "create_network"),), request)
context[] = _quota_exceeded(request, )
context[] = (
network_config.get(, True) and
policy.check((("network", "create_router"),), request))
context[] = _quota_exceeded(request, )
context[] = getattr(settings, , )
context[] = (
base.is_service_enabled(request, ) and
getattr(settings, , True))
context[] = (
base.is_service_enabled(request, ) and
getattr(settings, , False))
return context
|
Returns common context data for network topology views.
|
372,142
|
def shutdown(self):
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
|
close socket, immediately.
|
372,143
|
def get_writer_factory_for(self, name, *, format=None):
return self.get_factory_for(WRITER, name, format=format)
|
Returns a callable to build a writer for the provided filename, eventually forcing a format.
:param name: filename
:param format: format
:return: type
|
372,144
|
def pvpc_procesa_datos_dia(_, response, verbose=True):
try:
d_data = response[]
df = _process_json_pvpc_hourly_data(pd.DataFrame(d_data))
return df, 0
except Exception as e:
if verbose:
print(.format(e))
return None, -2
|
Procesa la información JSON descargada y forma el dataframe de los datos de un día.
|
372,145
|
def dumpLines(self):
for i, line in enumerate(self.lines):
logger.debug("Line %d:", i)
logger.debug(line.dumpFragments())
|
For debugging dump all line and their content
|
372,146
|
def check_exists(self):
response = self.repo.api.http_request(, self.uri)
self.status_code = response.status_code
if self.status_code == 200:
self.exists = True
elif self.status_code == 410:
self.exists = False
elif self.status_code == 404:
self.exists = False
return self.exists
|
Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists
|
372,147
|
def acknowledge_time(self):
if (self.is_acknowledged and
self._proto.acknowledgeInfo.HasField()):
return parse_isostring(self._proto.acknowledgeInfo.acknowledgeTime)
return None
|
Processor time when the alarm was acknowledged.
:type: :class:`~datetime.datetime`
|
372,148
|
def long_click(self, duration=2.0):
try:
duration = float(duration)
except ValueError:
raise ValueError(.format(repr(duration)))
pos_in_percentage = self.get_position(self._focus or )
self.poco.pre_action(, self, pos_in_percentage)
ret = self.poco.long_click(pos_in_percentage, duration)
self.poco.post_action(, self, pos_in_percentage)
return ret
|
Perform the long click action on the UI element(s) represented by the UI proxy. If this UI proxy represents a
set of UI elements, the first one in the set is clicked and the anchor point of the UI element is used as the
default one. Similar to click but press the screen for the given time interval and then release.
Args:
duration (:py:obj:`float`): whole action duration.
Return:
the same as :py:meth:`poco.pocofw.Poco.long_click`, depending on poco agent implementation.
|
372,149
|
def quantize_weights(full_precision_model,
nbits,
quantization_mode="linear",
sample_data=None,
**kwargs):
qmode_mapping = {
"linear": _QUANTIZATION_MODE_LINEAR_QUANTIZATION,
"kmeans": _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS,
"linear_lut": _QUANTIZATION_MODE_LOOKUP_TABLE_LINEAR,
"custom_lut": _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE,
"dequantization": _QUANTIZATION_MODE_DEQUANTIZE
}
try:
qmode = qmode_mapping[quantization_mode]
except KeyError:
raise Exception("Invalid quantization mode. Quantization mode must be "
"one of {}".format(qmode_mapping))
print("Quantizing using {} quantization".format(quantization_mode))
spec = full_precision_model.get_spec()
qspec = quantize_spec_weights(spec, nbits, qmode, **kwargs)
if macos_version() < (10, 14):
print("WARNING! Unable to return a quantized MLModel instance since OS != macOS 10.14 or later")
print("Returning quantized model specification instead")
return qspec
quantized_model = _get_model(qspec)
if not sample_data:
return quantized_model
compare_models(full_precision_model, quantized_model, sample_data)
return quantized_model
|
Utility function to convert a full precision (float) MLModel to a
nbit quantized MLModel (float16).
:param full_precision_model: MLModel
Model which will be converted to half precision. Currently conversion
for only neural network models is supported. If a pipeline model is
passed in then all embedded neural network models embedded within
will be converted.
:param nbits: Int
Number of bits per quantized weight. Only 8-bit and lower
quantization is supported
:param quantization_mode: str
One of:
"linear":
Simple linear quantization with scale and bias
"linear_lut":
Simple linear quantization represented as a lookup table
"kmeans_lut":
LUT based quantization, where LUT is generated by K-Means clustering
"custom_lut":
LUT quantization where LUT and quantized weight params are
calculated using a custom function. If this mode is selected then
a custom function must be passed in kwargs with key lut_function.
The function must have input params (nbits, wp) where nbits is the
number of quantization bits and wp is the list of weights for a
given layer. The function should return two parameters (lut, qw)
where lut is an array of length (2^nbits)containing LUT values and
qw is the list of quantized weight parameters. See
_get_linear_lookup_table_and_weight for a sample implementation.
:param sample_data: str | [dict]
Data used to characterize performance of the quantized model in
comparison to the full precision model. Either a list of sample input
dictionaries or an absolute path to a directory containing images.
Path to a directory containing images is only valid for models with
one image input. For all other models a list of sample inputs must be
provided.
:param **kwargs:
See below
:Keyword Arguments:
* *lut_function* (``callable function``) --
A callable function provided when quantization mode is set to
_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE. See quantization_mode for
more details
Returns
-------
model: MLModel
The quantized MLModel instance if running on macOS 10.14 or later,
otherwise the quantized model specification is returned
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import quantization_utils
>>> model = coremltools.models.MLModel('my_model.mlmodel')
>>> quantized_model = quantization_utils.quantize_weights(model, 8, "linear")
|
372,150
|
def exclude_matches(self, matches):
for match in matches:
for exclude_pattern in self.exclude_patterns:
if re.match(exclude_pattern, match) is not None:
break
else:
yield match
|
Filter any matches that match an exclude pattern.
:param matches: a list of possible completions
|
372,151
|
def max(self):
if "max" not in self.attrs.keys():
def f(dataset, s):
return np.nanmax(dataset[s])
self.attrs["max"] = np.nanmax(list(self.chunkwise(f).values()))
return self.attrs["max"]
|
Maximum, ignorning nans.
|
372,152
|
def sample(self, size=1):
samples = []
for i in range(size):
xi = self._r_xi_rv.rvs(size=1)
S_xi = skew(xi)
R_sample = scipy.linalg.expm(S_xi)
t_sample = self._t_rv.rvs(size=1)
samples.append(RigidTransform(rotation=R_sample,
translation=t_sample,
from_frame=self._from_frame,
to_frame=self._to_frame))
if size == 1 and len(samples) > 0:
return samples[0]
return samples
|
Sample rigid transform random variables.
Parameters
----------
size : int
number of sample to take
Returns
-------
:obj:`list` of :obj:`RigidTransform`
sampled rigid transformations
|
372,153
|
def subalignment(alnfle, subtype, alntype="fasta"):
aln = AlignIO.read(alnfle, alntype)
alnlen = aln.get_alignment_length()
nseq = len(aln)
subaln = None
subalnfile = alnfle.rsplit(".", 1)[0] + "_{0}.{1}".format(subtype, alntype)
if subtype == "synonymous":
for j in range( 0, alnlen, 3 ):
aa = None
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in CODON_TRANSLATION:
break
if aa and CODON_TRANSLATION[codon] != aa:
break
else:
aa = CODON_TRANSLATION[codon]
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subtype == "fourfold":
for j in range( 0, alnlen, 3 ):
for i in range(nseq):
codon = str(aln[i, j: j + 3].seq)
if codon not in FOURFOLD:
break
else:
if subaln is None:
subaln = aln[:, j: j + 3]
else:
subaln += aln[:, j: j + 3]
if subaln:
AlignIO.write(subaln, subalnfile, alntype)
return subalnfile
else:
print("No sites {0} selected.".format(subtype), file=sys.stderr)
return None
|
Subset synonymous or fourfold degenerate sites from an alignment
input should be a codon alignment
|
372,154
|
def _get_unit_factor(cls, unit):
try:
if isinstance(unit, str):
unit = cls.UNIT_FACTOR_NAMES[unit]
return cls.UNIT_FACTORS[unit]
except KeyError:
raise UnsupportedUnitError()
|
Returns the unit factor depending on the unit constant
:param int unit: the unit of the factor requested
:returns: a function to convert the raw sensor value to the given unit
:rtype: lambda function
:raises UnsupportedUnitError: if the unit is not supported
|
372,155
|
def certificate_issuer_id(self, certificate_issuer_id):
if certificate_issuer_id is None:
raise ValueError("Invalid value for `certificate_issuer_id`, must not be `None`")
if certificate_issuer_id is not None and len(certificate_issuer_id) > 32:
raise ValueError("Invalid value for `certificate_issuer_id`, length must be less than or equal to `32`")
self._certificate_issuer_id = certificate_issuer_id
|
Sets the certificate_issuer_id of this CreateCertificateIssuerConfig.
The ID of the certificate issuer.
:param certificate_issuer_id: The certificate_issuer_id of this CreateCertificateIssuerConfig.
:type: str
|
372,156
|
def count_replica(self, partition):
return sum(1 for b in partition.replicas if b in self.brokers)
|
Return count of replicas of given partition.
|
372,157
|
def frac_vol_floc_initial(ConcAluminum, ConcClay, coag, material):
return ((conc_precipitate(ConcAluminum, coag).magnitude/coag.PrecipDensity)
+ (ConcClay / material.Density))
|
Return the volume fraction of flocs initially present, accounting for both suspended particles and coagulant precipitates.
:param ConcAluminum: Concentration of aluminum in solution
:type ConcAluminum: float
:param ConcClay: Concentration of particle in suspension
:type ConcClay: float
:param coag: Type of coagulant in solution
:type coag: float
:param material: Type of particles in suspension, e.g. floc_model.Clay
:type material: floc_model.Material
:return: Volume fraction of particles initially present
:rtype: float
|
372,158
|
def _wrap_result(self, func):
def wrapper(*args):
result = func(*args)
if hasattr(result, ) and not isinstance(result, etree._Element):
return [self._wrap_element(element) for element in result]
else:
return self._wrap_element(result)
return wrapper
|
Wrap result in Parser instance
|
372,159
|
def has_length(self, value, q, strict=False):
value = stringify(value)
if value is not None:
if len(value) == q:
return
self.shout(, strict, value, q)
|
if value has a length of q
|
372,160
|
def _parse_keys(row, line_num):
link =
none_keys = [key for key in row.keys() if key is None]
if none_keys:
fail(
.format(line_num), link)
elif not row.get():
fail(
.format(line_num), link)
|
Perform some sanity checks on they keys
Each key in the row should not be named None cause
(that's an overrun). A key named `type` MUST be
present on the row & have a string value.
:param row: dict
:param line_num: int
|
372,161
|
def add_post_process(self, name, post_process, description=""):
self._pprocesses.append(PostProcess(name=name,
function=post_process,
description=description))
self._pprocesses[-1].function(self)
|
add a post-process
Parameters
----------
name : str
name of the post-traitment
post_process : callback (function of a class with a __call__ method
or a streamz.Stream).
this callback have to accept the simulation state as parameter
and return the modifield simulation state.
if a streamz.Stream is provided, it will me plugged_in with the
previous streamz (and ultimately to the initial_stream). All these
stream accept and return the simulation state.
description : str, optional, Default is "".
give extra information about the post-processing
|
372,162
|
def group_get(auth=None, **kwargs):
***
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.get_group(**kwargs)
|
Get a single group
CLI Example:
.. code-block:: bash
salt '*' keystoneng.group_get name=group1
salt '*' keystoneng.group_get name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e
salt '*' keystoneng.group_get name=0e4febc2a5ab4f2c8f374b054162506d
|
372,163
|
def __add_flag (rule_or_module, variable_name, condition, values):
assert isinstance(rule_or_module, basestring)
assert isinstance(variable_name, basestring)
assert is_iterable_typed(condition, property_set.PropertySet)
assert is_iterable(values) and all(
isinstance(v, (basestring, type(None))) for v in values)
f = Flag(variable_name, values, condition, rule_or_module)
m = __re_first_segment.match (rule_or_module)
assert m
module = m.group(1)
__module_flags.setdefault(module, []).append(f)
__flags.setdefault(rule_or_module, []).append(f)
|
Adds a new flag setting with the specified values.
Does no checking.
|
372,164
|
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
self.error()
return
try:
count = int(arg or 1)
except ValueError:
self.error( % arg)
return
if count < 0:
newframe = len(self.stack) - 1
else:
newframe = min(len(self.stack) - 1, self.curindex + count)
self._select_frame(newframe)
|
d(own) [count]
Move the current frame count (default one) levels down in the
stack trace (to a newer frame).
|
372,165
|
def FromBinary(cls, record_data, record_count=1):
if len(record_data) < ReflashTileRecord.RecordHeaderLength:
raise ArgumentError("Record was too short to contain a full reflash record header",
length=len(record_data), header_length=ReflashTileRecord.RecordHeaderLength)
offset, data_length, raw_target, hardware_type = struct.unpack_from("<LL8sB3x", record_data)
bindata = record_data[ReflashTileRecord.RecordHeaderLength:]
if len(bindata) != data_length:
raise ArgumentError("Embedded firmware length did not agree with actual length of embeded data",
length=len(bindata), embedded_length=data_length)
target = _parse_target(raw_target)
if target[]:
raise ArgumentError("Invalid targetting information, you "
"cannot reflash a controller with a ReflashTileRecord", target=target)
return ReflashTileRecord(target[], bindata, offset, hardware_type)
|
Create an UpdateRecord subclass from binary record data.
This should be called with a binary record blob (NOT including the
record type header) and it will decode it into a ReflashTileRecord.
Args:
record_data (bytearray): The raw record data that we wish to parse
into an UpdateRecord subclass NOT including its 8 byte record header.
record_count (int): The number of records included in record_data.
Raises:
ArgumentError: If the record_data is malformed and cannot be parsed.
Returns:
ReflashTileRecord: The decoded reflash tile record.
|
372,166
|
def check_bearer_validity(self, token: dict, connect_mtd) -> dict:
warnings.warn(
"Method is now executed as a decorator within the main SDK class. Will be removed in future versions.",
DeprecationWarning,
)
if datetime.now() < token.get("expires_at"):
token = connect_mtd
logging.debug("Token was about to expire, so has been renewed.")
else:
logging.debug("Token is still valid.")
pass
return token
|
Check API Bearer token validity.
Isogeo ID delivers authentication bearers which are valid during
a certain time. So this method checks the validity of the token
with a 30 mn anticipation limit, and renews it if necessary.
See: http://tools.ietf.org/html/rfc6750#section-2
FI: 24h = 86400 seconds, 30 mn = 1800, 5 mn = 300
:param tuple token: auth bearer to check.
Structure: (bearer, expiration_date)
:param isogeo_pysdk.connect connect_mtd: method herited
from Isogeo PySDK to get new bearer
|
372,167
|
def new_comment(self, string, start, end, line):
prefix = line[:start[1]]
if prefix.strip():
self.current_block.add(string, start, end, line)
else:
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block
|
Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
|
372,168
|
def is_usable(host, port, timeout=3):
try:
proxies = {
: % (host, port),
: % (host, port)
}
requests.get(, proxies=proxies, timeout=timeout)
except HTTPError:
print(, host, port)
return False
else:
print(, host, port)
return True
|
测试代理是否可用
params
----------
host ip地址
port 端口号
timeout 默认值为3,通过设置这个参数可以过滤掉一些速度慢的代理
example
----------
is_usable('222.180.24.13', '808', timeout=3)
|
372,169
|
def dollars_to_cents(s, allow_negative=False):
if not s:
return
if isinstance(s, string_types):
s = .join(RE_NUMBER.findall(s))
dollars = int(round(float(s) * 100))
if not allow_negative and dollars < 0:
raise ValueError()
return dollars
|
Given a string or integer representing dollars, return an integer of
equivalent cents, in an input-resilient way.
This works by stripping any non-numeric characters before attempting to
cast the value.
Examples::
>>> dollars_to_cents('$1')
100
>>> dollars_to_cents('1')
100
>>> dollars_to_cents(1)
100
>>> dollars_to_cents('1e2')
10000
>>> dollars_to_cents('-1$', allow_negative=True)
-100
>>> dollars_to_cents('1 dollar')
100
|
372,170
|
def get_credits_by_section_and_regid(section, regid):
deprecation("Use get_credits_by_reg_url")
except InvalidOperation:
pass
|
Returns a uw_sws.models.Registration object
for the section and regid passed in.
|
372,171
|
def access_func(self, id_, lineno, scope=None, default_type=None):
assert default_type is None or isinstance(default_type, symbols.TYPEREF)
result = self.get_entry(id_, scope)
if result is None:
if default_type is None:
if global_.DEFAULT_IMPLICIT_TYPE == TYPE.auto:
default_type = symbols.TYPEREF(self.basic_types[TYPE.auto], lineno, implicit=True)
else:
default_type = symbols.TYPEREF(self.basic_types[global_.DEFAULT_TYPE], lineno, implicit=True)
return self.declare_func(id_, lineno, default_type)
if not self.check_class(id_, CLASS.function, lineno, scope):
return None
return result
|
Since ZX BASIC allows access to undeclared functions, we must allow
and *implicitly* declare them if they are not declared already.
This function just checks if the id_ exists and returns its entry if so.
Otherwise, creates an implicit declared variable entry and returns it.
|
372,172
|
def append(self, cert):
if not isinstance(cert, x509.Certificate):
if not isinstance(cert, byte_cls):
raise TypeError(pretty_message(
,
type_name(cert)
))
if pem.detect(cert):
_, _, cert = pem.unarmor(cert)
cert = x509.Certificate.load(cert)
if cert.issuer_serial in self._cert_hashes:
raise DuplicateCertificateError()
self._cert_hashes.add(cert.issuer_serial)
self._certs.append(cert)
return self
|
Appends a cert to the path. This should be a cert issued by the last
cert in the path.
:param cert:
An asn1crypto.x509.Certificate object
:return:
The current ValidationPath object, for chaining
|
372,173
|
def process_user_input(self):
user_input = self.get_input()
try:
num = int(user_input)
except Exception:
return
if 0 < num < len(self.items) + 1:
self.current_option = num - 1
self.select()
return user_input
|
Gets the next single character and decides what to do with it
|
372,174
|
def truncate(text, length=50, ellipsis=):
text = nativestring(text)
return text[:length] + (text[length:] and ellipsis)
|
Returns a truncated version of the inputted text.
:param text | <str>
length | <int>
ellipsis | <str>
:return <str>
|
372,175
|
def minimum_pitch(self):
pitch = self.pitch
minimal_pitch = []
for p in pitch:
minimal_pitch.append(min(p))
return min(minimal_pitch)
|
Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction.
|
372,176
|
def date_time(self, tzinfo=None, end_datetime=None):
return datetime(1970, 1, 1, tzinfo=tzinfo) + \
timedelta(seconds=self.unix_time(end_datetime=end_datetime))
|
Get a datetime object for a date between January 1, 1970 and now
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2005-08-16 20:39:21')
:return datetime
|
372,177
|
def advance(self, length):
new_position = self._position + length
if new_position < 0 or new_position > len(self._data):
raise Exception(
% (length, new_position))
self._position = new_position
|
Advance the cursor in data buffer 'length' bytes.
|
372,178
|
def send_command_return_multilines(self, obj, command, *arguments):
index_command = obj._build_index_command(command, *arguments)
return self.chassis_list[obj.chassis].sendQuery(index_command, True)
|
Send command and wait for multiple lines output.
|
372,179
|
def run_symmetrized_readout(self, program: Program, trials: int) -> np.ndarray:
flipped_program = _get_flipped_protoquil_program(program)
if trials % 2 != 0:
raise ValueError("Using symmetrized measurement functionality requires that you "
"take an even number of trials.")
half_trials = trials // 2
flipped_program = flipped_program.wrap_in_numshots_loop(shots=half_trials)
flipped_executable = self.compile(flipped_program)
executable = self.compile(program.wrap_in_numshots_loop(half_trials))
samples = self.run(executable)
flipped_samples = self.run(flipped_executable)
double_flipped_samples = np.logical_not(flipped_samples).astype(int)
results = np.concatenate((samples, double_flipped_samples), axis=0)
np.random.shuffle(results)
return results
|
Run a quil program in such a way that the readout error is made collectively symmetric
This means the probability of a bitstring ``b`` being mistaken for a bitstring ``c`` is
the same as the probability of ``not(b)`` being mistaken for ``not(c)``
A more general symmetrization would guarantee that the probability of ``b`` being
mistaken for ``c`` depends only on which bit of ``c`` are different from ``b``. This
would require choosing random subsets of bits to flip.
In a noisy device, the probability of accurately reading the 0 state might be higher
than that of the 1 state. This makes correcting for readout more difficult. This
function runs the program normally ``(trials//2)`` times. The other half of the time,
it will insert an ``X`` gate prior to any ``MEASURE`` instruction and then flip the
measured classical bit back.
See :py:func:`run` for this function's parameter descriptions.
|
372,180
|
def extract_ranges(index_list, range_size_limit=32):
if not index_list:
return [], []
first = index_list[0]
last = first
ranges = []
singles = []
for i in index_list[1:]:
if i == last + 1 and (last - first) <= range_size_limit:
last = i
else:
if last > first:
ranges.append([first, last])
else:
singles.append(first)
first = i
last = i
if last > first:
ranges.append([first, last])
else:
singles.append(first)
return ranges, singles
|
Extract consecutive ranges and singles from index_list.
Args:
index_list: List of monotone increasing non-negative integers.
range_size_limit: Largest size range to return. If a larger
consecutive range exists it will be returned as multiple
ranges.
Returns:
ranges, singles where ranges is a list of [first, last] pairs of
consecutive elements in index_list, and singles is all of the
other elements, in original order.
|
372,181
|
def arrow_(self, xloc, yloc, text, orientation="v", arrowstyle=):
try:
arrow = hv.Arrow(
xloc,
yloc,
text,
orientation,
arrowstyle=arrowstyle)
return arrow
except Exception as e:
self.err(e, self.arrow_, "Can not draw arrow chart")
|
Returns an arrow for a chart. Params: the text, xloc and yloc are
coordinates to position the arrow. Orientation is the way to display
the arrow: possible values are ``[<, ^, >, v]``. Arrow style is the
graphic style of the arrow:
possible values: ``[-, ->, -[, -|>, <->, <|-|>]``
|
372,182
|
def delete(self, loc):
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
|
Delete given loc(-s) from block in-place.
|
372,183
|
def create_audio_mp3_profile(apps, schema_editor):
Profile = apps.get_model(, )
Profile.objects.get_or_create(profile_name=AUDIO_MP3_PROFILE)
|
Create audio_mp3 profile
|
372,184
|
def _make_association(self, clk=None, rst=None) -> None:
if clk is not None:
assert self._associatedClk is None
self._associatedClk = clk
if rst is not None:
assert self._associatedRst is None
self._associatedRst = rst
|
Associate this object with specified clk/rst
|
372,185
|
def _write(self, data):
total_sent = 0
length = len(data)
while total_sent < length:
try:
sent = self.socket.send(data[total_sent:])
except socket.error as (err, msg):
self.connected = False
raise ScratchError("[Errno %d] %s" % (err, msg))
if sent == 0:
self.connected = False
raise ScratchConnectionError("Connection broken")
total_sent += sent
|
Writes string data out to Scratch
|
372,186
|
def update(cls, cluster_id_label, cluster_info):
conn = Qubole.agent(version="v2")
return conn.put(cls.element_path(cluster_id_label), data=cluster_info)
|
Update the cluster with id/label `cluster_id_label` using information provided in
`cluster_info`.
|
372,187
|
def get_path_matching(name):
p = os.path.join(os.path.expanduser("~"), name)
if not os.path.isdir(p):
p = None
drive, folders = os.path.splitdrive(os.getcwd())
folders = folders.split(os.sep)
folders.insert(0, os.sep)
if name in folders:
p = os.path.join(drive, *folders[: folders.index(name) + 1])
return p
|
Get path matching a name.
Parameters
----------
name : string
Name to search for.
Returns
-------
string
Full filepath.
|
372,188
|
def _pypi_head_package(dependency):
if dependency.specs:
_, version = dependency.specs[0]
url = BASE_PYPI_URL_WITH_VERSION.format(name=dependency.project_name, version=version)
else:
url = BASE_PYPI_URL.format(name=dependency.project_name)
logger.debug("Doing HEAD requests against %s", url)
req = request.Request(url, method=)
try:
response = request.urlopen(req)
except HTTPError as http_error:
if http_error.code == HTTP_STATUS_NOT_FOUND:
return False
else:
raise
if response.status == HTTP_STATUS_OK:
logger.debug("%r exists in PyPI.", dependency)
return True
else:
logger.warning("Got a (unexpected) HTTP_STATUS=%r and reason=%r checking if %r exists",
response.status, response.reason, dependency)
return True
|
Hit pypi with a http HEAD to check if pkg_name exists.
|
372,189
|
def download(self, path, file):
resp = self._sendRequest("GET", path)
if resp.status_code == 200:
with open(file, "wb") as f:
f.write(resp.content)
else:
raise YaDiskException(resp.status_code, resp.content)
|
Download remote file to disk.
|
372,190
|
def _build_index(self):
datasets_index = {}
distributions_index = {}
fields_index = {}
for dataset_index, dataset in enumerate(self.datasets):
if "identifier" in dataset:
datasets_index[dataset["identifier"]] = {
"dataset_index": dataset_index
}
for distribution_index, distribution in enumerate(
dataset.get("distribution", [])):
if "identifier" in distribution:
distributions_index[distribution["identifier"]] = {
"distribution_index": distribution_index,
"dataset_identifier": dataset["identifier"]
}
for field_index, field in enumerate(
distribution.get("field", [])):
if "id" in field:
fields_index[field["id"]] = {
"field_index":
field_index,
"dataset_identifier":
dataset["identifier"],
"distribution_identifier":
distribution["identifier"]
}
setattr(self, "_distributions_index", distributions_index)
setattr(self, "_datasets_index", datasets_index)
setattr(self, "_fields_index", fields_index)
|
Itera todos los datasets, distribucioens y fields indexandolos.
|
372,191
|
def get_ajax(self, request, *args, **kwargs):
response_data = self.get_json_response_object(self._datatable)
response = HttpResponse(self.serialize_to_json(response_data),
content_type="application/json")
return response
|
Called when accessed via AJAX on the request method specified by the Datatable.
|
372,192
|
def create_network(self):
class_ = getattr(networks, self.network_class)
return class_(max_size=self.quorum)
|
Create a new network by reading the configuration file.
|
372,193
|
def __r1_hungarian(self, word, vowels, digraphs):
r1 = ""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in range(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in range(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
|
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
|
372,194
|
def delete(self, photo, **kwds):
return self._client.post("/photo/%s/delete.json" %
self._extract_id(photo),
**kwds)["result"]
|
Endpoint: /photo/<id>/delete.json
Deletes a photo.
Returns True if successful.
Raises a TroveboxError if not.
|
372,195
|
def extern_project_multi(self, context_handle, val, field_str_ptr, field_str_len):
c = self._ffi.from_handle(context_handle)
obj = c.from_value(val[0])
field_name = self.to_py_str(field_str_ptr, field_str_len)
return c.vals_buf(tuple(c.to_value(p) for p in getattr(obj, field_name)))
|
Given a Key for `obj`, and a field name, project the field as a list of Keys.
|
372,196
|
def to_excel(self,
workbook=None,
worksheet=None,
xl_app=None,
clear=True,
rename=True,
resize_columns=True):
from win32com.client import Dispatch, constants, gencache
if xl_app is None:
if worksheet is not None:
xl_app = worksheet.Parent.Application
elif workbook is not None and hasattr(workbook.workbook_obj, "Application"):
xl_app = workbook.workbook_obj.Application
else:
xl_app = Dispatch("Excel.Application")
xl = xl_app = gencache.EnsureDispatch(xl_app)
return workbook.to_excel(xl_app=xl_app, resize_columns=resize_columns)
if rename:
self.__name = worksheet.Name
calculation = xl.Calculation
screen_updating = xl.ScreenUpdating
xl.Calculation = constants.xlCalculationManual
xl.ScreenUpdating = False
try:
if clear:
worksheet.Cells.ClearContents()
worksheet.Cells.Font.Bold = False
worksheet.Cells.Font.Size = 11
worksheet.Cells.Font.Color = 0x000000
worksheet.Cells.Interior.ColorIndex = 0
worksheet.Cells.NumberFormat = "General"
array_formula_tables = []
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
array_formula_tables.append((row, col, row + table.height, col + table.width))
def _is_in_array_formula_table(row, col):
for top, left, bottom, right in array_formula_tables:
if bottom >= row >= top and left <= col <= right:
return True
return False
origin = worksheet.Range("A1")
xl_cell = origin
for r, row in enumerate(self.iterrows(workbook)):
row = _to_pywintypes(row)
if clear:
xl_row = worksheet.Range(xl_cell, xl_cell.Offset(1, len(row)))
xl_row.Value = row
else:
for c, value in enumerate(row):
if value is not None:
xl_cell.Offset(1, 1 + c).Value = value
for c, value in enumerate(row):
if isinstance(value, str):
if value.startswith("="):
formula_value = self.__formula_values.get((r, c), 0)
xl_cell.Offset(1, 1 + c).Value = formula_value
xl_cell.Offset(1, 1 + c).Formula = value
elif value.startswith("{=") \
and not _is_in_array_formula_table(r, c):
formula_value = self.__formula_values.get((r, c), 0)
xl_cell.Offset(1, 1 + c).Value = formula_value
xl_cell.Offset(1, 1 + c).FormulaArray = value
xl_cell = xl_cell.Offset(2, 1)
for table, (row, col) in self.__tables.values():
if isinstance(table, ArrayFormula):
data = table.get_data(workbook, row, col)
height, width = data.shape
upper_left = origin.Offset(row+1, col+1)
lower_right = origin.Offset(row + height, col + width)
xl_range = worksheet.Range(upper_left, lower_right)
xl_range.FormulaArray = table.formula.get_formula(workbook, row, col)
for (row, col), style in self._get_all_styles().items():
r = origin.Offset(1 + row, 1 + col)
if style.bold:
r.Font.Bold = True
if style.excel_number_format is not None:
r.NumberFormat = style.excel_number_format
if style.size is not None:
r.Font.Size = style.size
if style.text_color is not None:
r.Font.Color = _to_bgr(style.text_color)
if style.bg_color is not None:
r.Interior.Color = _to_bgr(style.bg_color)
if style.text_wrap or style.border:
raise Exception("text wrap and border not implemented")
for chart, (row, col) in self.__charts:
top_left = origin.Offset(1 + row, 1 + col)
xl_chart = worksheet.ChartObjects().Add(top_left.Left, top_left.Top, 360, 220).Chart
xl_chart.ChartType = _to_excel_chart_type(chart.type, chart.subtype)
if chart.title:
xl_chart.ChartTitle = chart.title
for series in chart.iter_series(self, row, col):
xl_series = xl_chart.SeriesCollection().NewSeries()
xl_series.Values = "=%s!%s" % (self.name, series["values"].lstrip("="))
if "categories" in series:
xl_series.XValues = "=%s!%s" % (self.name, series["categories"].lstrip("="))
if "name" in series:
xl_series.Name = series["name"]
finally:
xl.ScreenUpdating = screen_updating
xl.Calculation = calculation
if resize_columns:
try:
worksheet.Cells.EntireColumn.AutoFit()
except:
pass
|
Writes worksheet to an Excel Worksheet COM object.
Requires :py:module:`pywin32` to be installed.
:param workbook: xltable.Workbook this sheet belongs to.
:param worksheet: Excel COM Worksheet instance to write to.
:param xl_app: Excel COM Excel Application to write to.
:param bool clear: If a worksheet is provided, clear worksheet before writing.
:param bool rename: If a worksheet is provided, rename self to match the worksheet.
:param bool resize_columns: Resize sheet columns after writing.
|
372,197
|
def content(self):
if not self.payload:
return
if self.payload.is_multipart():
message_bytes = mime_to_bytes(
self.payload, 0).replace(b, b)
boundary = b + self.payload.get_boundary().encode()
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
content = self.payload.get_payload()
if isinstance(content, str_cls):
content = content.encode()
return content
|
Function returns the body of the as2 payload as a bytes object
|
372,198
|
def parse(out):
jsonret = []
in_json = False
for ln_ in out.split():
if in ln_:
in_json = True
if in_json:
jsonret.append(ln_)
if in ln_:
in_json = False
return salt.utils.json.loads(.join(jsonret))
|
Extract json from out.
Parameter
out: Type string. The data returned by the
ssh command.
|
372,199
|
def filtered_context(context):
ctx = Context(context.opt)
for resource in context.resources():
if resource.child:
continue
if resource.filtered():
ctx.add(resource)
return ctx
|
Filters a context
This will return a new context with only the resources that
are actually available for use. Uses tags and command line
options to make determination.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.