Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
372,700
|
def printBlastRecord(record):
for key in sorted(record.__dict__.keys()):
if key not in [, , ]:
print( % (key, record.__dict__[key]))
print( % len(record.alignments))
for i, alignment in enumerate(record.alignments):
print( % (i + 1))
for attr in [, , , , ]:
print( % (attr, getattr(record.descriptions[i], attr)))
print( % (i + 1))
for attr in , , , , :
print( % (attr, getattr(alignment, attr)))
print( % len(alignment.hsps))
for hspIndex, hsp in enumerate(alignment.hsps, start=1):
print( % hspIndex)
printHSP(hsp, )
|
Print a BLAST record.
@param record: A BioPython C{Bio.Blast.Record.Blast} instance.
|
372,701
|
def postpro_fisher(data, report=None):
if not report:
report = {}
data[data < -0.99999999999999] = -1
data[data > 0.99999999999999] = 1
fisher_data = 0.5 * np.log((1 + data) / (1 - data))
report[] = {}
report[][] =
return fisher_data, report
|
Performs fisher transform on everything in data.
If report variable is passed, this is added to the report.
|
372,702
|
def scale_aphi(self, scale_parameter):
lg.info( + str(scale_parameter))
try:
self.a_phi = self.a_phi * scale_parameter
except:
lg.exception("Can't scale a_phi, check that it has been defined ")
|
Scale the spectra by multiplying by linear scaling factor
:param scale_parameter: Linear scaling factor
|
372,703
|
def get_leaf_certificates(certs):
issuers = [cert.issuer.get_attributes_for_oid(x509.NameOID.COMMON_NAME)
for cert in certs]
leafs = [cert for cert in certs
if (cert.subject.get_attributes_for_oid(x509.NameOID.COMMON_NAME)
not in issuers)]
return leafs
|
Extracts the leaf certificates from a list of certificates. Leaf
certificates are ones whose subject does not appear as issuer among the
others.
|
372,704
|
def missing(self, field, last=True):
priceprice
if last:
self.append({field: {: }})
else:
self.append({field: {: }})
return self
|
Numeric fields support specific handling for missing fields in a doc.
The missing value can be _last, _first, or a custom value
(that will be used for missing docs as the sort value).
missing('price')
> {"price" : {"missing": "_last" } }
missing('price',False)
> {"price" : {"missing": "_first"} }
|
372,705
|
def n1ql_query(self, query, *args, **kwargs):
if not isinstance(query, N1QLQuery):
query = N1QLQuery(query)
itercls = kwargs.pop(, N1QLRequest)
return itercls(query, self, *args, **kwargs)
|
Execute a N1QL query.
This method is mainly a wrapper around the :class:`~.N1QLQuery`
and :class:`~.N1QLRequest` objects, which contain the inputs
and outputs of the query.
Using an explicit :class:`~.N1QLQuery`::
query = N1QLQuery(
'SELECT airportname FROM `travel-sample` WHERE city=$1', "Reno")
# Use this option for often-repeated queries
query.adhoc = False
for row in cb.n1ql_query(query):
print 'Name: {0}'.format(row['airportname'])
Using an implicit :class:`~.N1QLQuery`::
for row in cb.n1ql_query(
'SELECT airportname, FROM `travel-sample` WHERE city="Reno"'):
print 'Name: {0}'.format(row['airportname'])
With the latter form, *args and **kwargs are forwarded to the
N1QL Request constructor, optionally selected in kwargs['iterclass'],
otherwise defaulting to :class:`~.N1QLRequest`.
:param query: The query to execute. This may either be a
:class:`.N1QLQuery` object, or a string (which will be
implicitly converted to one).
:param kwargs: Arguments for :class:`.N1QLRequest`.
:return: An iterator which yields rows. Each row is a dictionary
representing a single result
|
372,706
|
def set_assessments(self, assessment_ids=None):
if assessment_ids is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA[])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(assessment_ids, metadata, array=True):
for assessment_id in assessment_ids:
self._my_map[].append(str(assessment_id))
else:
raise InvalidArgument
|
Sets the assessments.
arg: assessmentIds (osid.id.Id): the assessment Ids
raise: INVALID_ARGUMENT - assessmentIds is invalid
raise: NullArgument - assessmentIds is null
raise: NoAccess - metadata.is_read_only() is true
compliance: mandatory - This method must be implemented.
|
372,707
|
def make_class(name, attrs, bases=(object,), **attributes_arguments):
if isinstance(attrs, dict):
cls_dict = attrs
elif isinstance(attrs, (list, tuple)):
cls_dict = dict((a, attrib()) for a in attrs)
else:
raise TypeError("attrs argument must be a dict or a list.")
post_init = cls_dict.pop("__attrs_post_init__", None)
type_ = type(
name,
bases,
{} if post_init is None else {"__attrs_post_init__": post_init},
)
try:
type_.__module__ = sys._getframe(1).f_globals.get(
"__name__", "__main__"
)
except (AttributeError, ValueError):
pass
return _attrs(these=cls_dict, **attributes_arguments)(type_)
|
A quick way to create a new class called *name* with *attrs*.
:param name: The name for the new class.
:type name: str
:param attrs: A list of names or a dictionary of mappings of names to
attributes.
If *attrs* is a list or an ordered dict (:class:`dict` on Python 3.6+,
:class:`collections.OrderedDict` otherwise), the order is deduced from
the order of the names or attributes inside *attrs*. Otherwise the
order of the definition of the attributes is used.
:type attrs: :class:`list` or :class:`dict`
:param tuple bases: Classes that the new class will subclass.
:param attributes_arguments: Passed unmodified to :func:`attr.s`.
:return: A new class with *attrs*.
:rtype: type
.. versionadded:: 17.1.0 *bases*
.. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
|
372,708
|
def del_edges(self, edges, *args, **kwargs):
for edge in edges:
self.del_edge(edge, *args, **kwargs)
|
Removes edges from the graph. Takes optional arguments for
``DictGraph.del_edge``.
Arguments:
- edges(iterable) Sequence of edges to be removed from the
``DictGraph``.
|
372,709
|
def preview(src_path):
previews = []
if sketch.is_sketchfile(src_path):
previews = sketch.preview(src_path)
if not previews:
previews = quicklook.preview(src_path)
previews = [safely_decode(preview) for preview in previews]
return previews
|
Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page.
|
372,710
|
def write_info_file(tensorboard_info):
payload = "%s\n" % _info_to_string(tensorboard_info)
with open(_get_info_file_path(), "w") as outfile:
outfile.write(payload)
|
Write TensorBoardInfo to the current process's info file.
This should be called by `main` once the server is ready. When the
server shuts down, `remove_info_file` should be called.
Args:
tensorboard_info: A valid `TensorBoardInfo` object.
Raises:
ValueError: If any field on `info` is not of the correct type.
|
372,711
|
def fraction(self, value: float) -> :
raise_not_number(value)
self.maximum = .format(value)
return self
|
Set the fraction of free space to use.
|
372,712
|
def get_app_prefs(app=None):
if app is None:
with Frame(stepback=1) as frame:
app = frame.f_globals[].split()[0]
prefs = get_prefs()
if app not in prefs:
return {}
return prefs[app]
|
Returns a dictionary with preferences for a certain app/module.
:param str|unicode app:
:rtype: dict
|
372,713
|
def _narrow_states(node, old_state, new_state, previously_widened_state):
l.debug(, previously_widened_state.ip)
s = previously_widened_state.copy()
narrowing_occurred = False
return s, narrowing_occurred
|
Try to narrow the state!
:param old_state:
:param new_state:
:param previously_widened_state:
:returns: The narrowed state, and whether a narrowing has occurred
|
372,714
|
def get_pastml_marginal_prob_file(method, model, column):
if not is_marginal(method):
return None
column, method = get_column_method(column, method)
return PASTML_MARGINAL_PROBS_TAB.format(state=column, model=model)
|
Get the filename where the PastML marginal probabilities of node states are saved (will be None for non-marginal methods).
This file is inside the work_dir that can be specified for the pastml_pipeline method.
:param method: str, the ancestral state prediction method used by PASTML.
:param model: str, the state evolution model used by PASTML.
:param column: str, the column for which ancestral states are reconstructed with PASTML.
:return: str, filename or None if the method is not marginal.
|
372,715
|
def pad_positive_wrapper(fmtfct):
def check_and_append(*args, **kwargs):
result = fmtfct(*args, **kwargs)
if fmtfct.parens and not result.endswith():
result +=
return result
return check_and_append
|
Ensure that numbers are aligned in table by appending a blank space to postive values if 'parenthesis' are
used to denote negative numbers
|
372,716
|
def poly_energies(samples_like, poly):
msg = ("poly_energies is deprecated and will be removed in dimod 0.9.0."
"In the future, use BinaryPolynomial.energies")
warnings.warn(msg, DeprecationWarning)
return BinaryPolynomial(poly, ).energies(samples_like)
|
Calculates energy of samples from a higher order polynomial.
Args:
sample (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
poly (dict):
Polynomial as a dict of form {term: bias, ...}, where `term` is a
tuple of variables and `bias` the associated bias. Variable
labeling/indexing of terms in poly dict must match that of the
sample(s).
Returns:
list/:obj:`numpy.ndarray`: The energy of the sample(s).
|
372,717
|
def register(email):
data = __utils__[](
.format(_base_url()),
method=,
data=salt.utils.json.dumps({
: email,
: ,
}),
status=True,
decode=True,
decode_type=,
header_dict={
: ,
},
)
status = data[]
if six.text_type(status).startswith() or six.text_type(status).startswith():
raise CommandExecutionError(
.format(data[])
)
return data.get(, {})
|
Register a new user account
CLI Example:
.. code-block:: bash
salt-run venafi.register email@example.com
|
372,718
|
def _check_location_part(cls, val, regexp):
cls._deprecation_warning()
return CourseLocator._check_location_part(val, regexp)
|
Deprecated. See CourseLocator._check_location_part
|
372,719
|
def choose_form(self, number=None, xpath=None, name=None, **kwargs):
id_ = kwargs.pop(, None)
if id_ is not None:
try:
self._lxml_form = self.select( % id_).node()
except IndexError:
raise DataNotFound("There is no form with id: %s" % id_)
elif name is not None:
try:
self._lxml_form = self.select(
% name).node()
except IndexError:
raise DataNotFound( % name)
elif number is not None:
try:
self._lxml_form = self.tree.forms[number]
except IndexError:
raise DataNotFound( % number)
elif xpath is not None:
try:
self._lxml_form = self.select(xpath).node()
except IndexError:
raise DataNotFound(
% xpath)
else:
raise GrabMisuseError(
)
|
Set the default form.
:param number: number of form (starting from zero)
:param id: value of "id" attribute
:param name: value of "name" attribute
:param xpath: XPath query
:raises: :class:`DataNotFound` if form not found
:raises: :class:`GrabMisuseError`
if method is called without parameters
Selected form will be available via `form` attribute of `Grab`
instance. All form methods will work with default form.
Examples::
# Select second form
g.choose_form(1)
# Select by id
g.choose_form(id="register")
# Select by name
g.choose_form(name="signup")
# Select by xpath
g.choose_form(xpath='//form[contains(@action, "/submit")]')
|
372,720
|
def authorize(*args, **kwargs):
if request.method == :
client = Client.query.filter_by(
client_id=kwargs.get()
).first()
if not client:
abort(404)
scopes = current_oauth2server.scopes
ctx = dict(
client=client,
oauth_request=kwargs.get(),
scopes=[scopes[x] for x in kwargs.get(, [])],
)
return render_template(, **ctx)
confirm = request.form.get(, )
return confirm ==
|
View for rendering authorization request.
|
372,721
|
def validate_tzinfo(dummy, value):
if value is not None and not isinstance(value, datetime.tzinfo):
raise TypeError("%s must be an instance of datetime.tzinfo" % value)
return value
|
Validate the tzinfo option
|
372,722
|
def cli():
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(
,
datefmt="%Y-%m-%d %H:%M:%S"
))
logger.addHandler(ch)
import argparse
parser = argparse.ArgumentParser(description="Search for hosts with a \
response to that matches ")
parser.add_argument(, help=)
parser.add_argument(, , help=,
default=)
parser.add_argument(, , help=,
dest=, default=)
parser.add_argument(, , help=, action=)
args = parser.parse_args()
print()
result = survey(**vars(args))
print(.format(len(result), if len(result)!=1 else ,
if args.pattern else , args.pattern, args.network))
for x in result:
print(x.hostname)
|
Command line interface
|
372,723
|
def _refresh_authentication_token(self):
if self.retry == self._MAX_RETRIES:
raise GeocoderAuthenticationFailure(
% self.retry
)
token_request_arguments = {
: self.username,
: self.password,
: self.referer,
: self.token_lifetime,
:
}
url = "?".join((self.auth_api, urlencode(token_request_arguments)))
logger.debug(
"%s._refresh_authentication_token: %s",
self.__class__.__name__, url
)
self.token_expiry = int(time()) + self.token_lifetime
response = self._base_call_geocoder(url)
if not in response:
raise GeocoderAuthenticationFailure(
%
(url, json.dumps(response))
)
self.retry = 0
self.token = response[]
|
POST to ArcGIS requesting a new token.
|
372,724
|
def Reload(self):
logger.info()
self._reload_called = True
with self._accumulators_mutex:
items = list(self._accumulators.items())
names_to_delete = set()
for name, accumulator in items:
try:
accumulator.Reload()
except (OSError, IOError) as e:
logger.error("Unable to reload accumulator : %s", name, e)
except directory_watcher.DirectoryDeletedError:
names_to_delete.add(name)
with self._accumulators_mutex:
for name in names_to_delete:
logger.warn("Deleting accumulator ", name)
del self._accumulators[name]
logger.info()
return self
|
Call `Reload` on every `EventAccumulator`.
|
372,725
|
def update(self, params):
dev_info = self.json_state.get()
dev_info.update({k: params[k] for k in params if dev_info.get(k)})
|
Update the dev_info data from a dictionary.
Only updates if it already exists in the device.
|
372,726
|
def query_by_grader(self, grader_id, end_time=None, start_time=None):
path = {}
data = {}
params = {}
path["grader_id"] = grader_id
if start_time is not None:
params["start_time"] = start_time
if end_time is not None:
params["end_time"] = end_time
self.logger.debug("GET /api/v1/audit/grade_change/graders/{grader_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/audit/grade_change/graders/{grader_id}".format(**path), data=data, params=params, all_pages=True)
|
Query by grader.
List grade change events for a given grader.
|
372,727
|
def visit_module(self, node):
if not node.file_stream:
return
isFirstLineOfComment = True
isDocString = False
lines = node.stream().readlines()
for linenum, line in enumerate(lines):
if line.strip().startswith(b):
isDocString = not isDocString
if isDocString:
continue
matchedComment = COMMENT_RGX.search(line)
if matchedComment:
if isFirstLineOfComment:
comment = matchedComment.group()
if (comment.startswith(b"
not comment.startswith(b"
self.add_message(, line=linenum + 1, node=node)
strippedComment = comment.lstrip(b"
if strippedComment:
firstLetter = strippedComment[0:1]
if (firstLetter.isalpha() and
not firstLetter.isupper()):
self.add_message(, line=linenum + 1, node=node)
isFirstLineOfComment = False
else:
isFirstLineOfComment = True
|
A interface will be called when visiting a module.
@param node: node of current module
|
372,728
|
def reporter(metadata, analysistype, reportpath):
header =
data = str()
for sample in metadata:
try:
if sample[analysistype].blastresults != :
if sample.general.closestrefseqgenus == :
closestref = list(sample[analysistype].blastresults.items())[0][0]
coregenes = list(sample[analysistype].blastresults.items())[0][1][0]
try:
ref = glob(os.path.join(sample[analysistype].targetpath,
.format(fasta=closestref)))[0]
except IndexError:
closestref = closestref.replace(, )
ref = glob(os.path.join(sample[analysistype].targetpath,
.format(fasta=closestref)))[0]
totalcore = 0
for _ in SeqIO.parse(ref, ):
totalcore += 1
sample[analysistype].targetspresent = coregenes
sample[analysistype].totaltargets = totalcore
sample[analysistype].coreresults = .format(cg=coregenes,
tc=totalcore)
row = .format(sn=sample.name,
cr=closestref,
cg=coregenes,
tc=totalcore)
with open(os.path.join(sample[analysistype].reportdir,
.format(sn=sample.name,
at=analysistype)), ) as report:
report.write(header)
report.write(row)
data += row
else:
sample[analysistype].targetspresent =
sample[analysistype].totaltargets =
sample[analysistype].coreresults =
except KeyError:
sample[analysistype].targetspresent =
sample[analysistype].totaltargets =
sample[analysistype].coreresults =
with open(os.path.join(reportpath, ), ) as report:
report.write(header)
report.write(data)
|
Create the core genome report
:param metadata: type LIST: List of metadata objects
:param analysistype: type STR: Current analysis type
:param reportpath: type STR: Absolute path to folder in which the reports are to be created
:return:
|
372,729
|
def create(gandi):
contact = {}
for field, label, checks in FIELDS:
ask_field(gandi, contact, field, label, checks)
default_pwd = randomstring(16)
contact[] = click.prompt(,
hide_input=True,
confirmation_prompt=True,
default=default_pwd)
result = True
while result:
result = gandi.contact.create_dry_run(contact)
for err in result:
gandi.echo(err[])
field = err[]
if field not in FIELDS_POSITION:
return
desc = FIELDS[FIELDS_POSITION.get(field)]
ask_field(gandi, contact, *desc)
result = gandi.contact.create(contact)
handle = result[]
gandi.echo(
)
gandi.echo( %
handle)
webbrowser.open()
time.sleep(1)
apikey = None
while not apikey:
apikey = click.prompt()
caller = gandi.get()
if caller:
gandi.echo(
)
gandi.echo( % apikey)
else:
gandi.echo()
gandi.configure(True, , apikey)
return handle
|
Create a new contact.
|
372,730
|
def create_sparse_mapping(id_array, unique_ids=None):
if unique_ids is None:
unique_ids = get_original_order_unique_ids(id_array)
assert isinstance(unique_ids, np.ndarray)
assert isinstance(id_array, np.ndarray)
assert unique_ids.ndim == 1
assert id_array.ndim == 1
represented_ids = np.in1d(id_array, unique_ids)
num_non_zero_rows = represented_ids.sum()
num_rows = id_array.size
num_cols = unique_ids.size
data = np.ones(num_non_zero_rows, dtype=int)
row_indices = np.arange(num_rows)[represented_ids]
unique_id_dict = dict(zip(unique_ids, np.arange(num_cols)))
col_indices =\
np.array([unique_id_dict[x] for x in id_array[represented_ids]])
return csr_matrix((data, (row_indices, col_indices)),
shape=(num_rows, num_cols))
|
Will create a scipy.sparse compressed-sparse-row matrix that maps
each row represented by an element in id_array to the corresponding
value of the unique ids in id_array.
Parameters
----------
id_array : 1D ndarray of ints.
Each element should represent some id related to the corresponding row.
unique_ids : 1D ndarray of ints, or None, optional.
If not None, each element should be present in `id_array`. The elements
in `unique_ids` should be present in the order in which one wishes them
to appear in the columns of the resulting sparse array. For the
`row_to_obs` and `row_to_mixers` mappings, this should be the order of
appearance in `id_array`. If None, then the unique_ids will be created
from `id_array`, in the order of their appearance in `id_array`.
Returns
-------
mapping : 2D scipy.sparse CSR matrix.
Will contain only zeros and ones. `mapping[i, j] == 1` where
`id_array[i] == unique_ids[j]`. The id's corresponding to each column
are given by `unique_ids`. The rows correspond to the elements of
`id_array`.
|
372,731
|
def main(argv=None):
@contextmanager
def tmpdir():
path = mkdtemp()
try:
yield path
finally:
rmtree(path)
return
def test():
install = "{:s} install -r requirements-test.txt"
check_call(install.format(pip).split())
pytest = join(path, "bin", "py.test")
test = "{:s} test/".format(pytest)
check_call(test.split())
uninstall = "{:s} uninstall -y -r requirements-test.txt"
check_call(uninstall.format(pip).split())
return
args = _cmdline(argv)
path = join(abspath(args.root), args.name)
with tmpdir() as tmp:
clone = "git clone {:s} {:s}".format(args.repo, tmp)
check_call(clone.split())
chdir(tmp)
checkout = "git checkout {:s}".format(args.checkout)
check_call(checkout.split())
virtualenv = "virtualenv {:s}".format(path)
check_call(virtualenv.split())
pip = join(path, "bin", "pip")
install = "{:s} install -U -r requirements.txt .".format(pip)
check_call(install.split())
if args.test:
test()
return 0
|
Script execution.
The project repo will be cloned to a temporary directory, and the desired
branch, tag, or commit will be checked out. Then, the application will be
installed into a self-contained virtualenv environment.
|
372,732
|
def list_projects(self, dataset_name):
url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\
+ "/project/"
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataNotFoundError(.format(req.text))
else:
return req.json()
|
Lists a set of projects related to a dataset.
Arguments:
dataset_name (str): Dataset name to search projects for
Returns:
dict: Projects found based on dataset query
|
372,733
|
def chunk(iterable, size):
iterator = iter(iterable)
while size:
result = []
try:
for i in range(size):
elem = next(iterator)
result.append(elem)
yield tuple(result)
except StopIteration:
if result:
yield tuple(result)
return
|
chunk('ABCDEFG', 3) --> ABC DEF G
|
372,734
|
def _validate_relations(relations, services, add_error):
if not relations:
return
for relation in relations:
if not islist(relation):
add_error(.format(relation))
continue
relation_str = .join(.format(i) for i in relation)
for endpoint in relation:
if not isstring(endpoint):
add_error(
.format(relation_str, endpoint))
continue
try:
service, _ = endpoint.split()
except ValueError:
service = endpoint
if service not in services:
add_error(
.format(relation_str, endpoint, service))
|
Validate relations, ensuring that the endpoints exist.
Receive the relations and services bundle sections.
Use the given add_error callable to register validation error.
|
372,735
|
def sina_xml_to_url_list(xml_data):
rawurl = []
dom = parseString(xml_data)
for node in dom.getElementsByTagName():
url = node.getElementsByTagName()[0]
rawurl.append(url.childNodes[0].data)
return rawurl
|
str->list
Convert XML to URL List.
From Biligrab.
|
372,736
|
def find_carbon_sources(model):
try:
model.slim_optimize(error_value=None)
except OptimizationError:
return []
reactions = model.reactions.get_by_any(list(model.medium))
reactions_fluxes = [
(rxn, total_components_flux(rxn.flux, reaction_elements(rxn),
consumption=True)) for rxn in reactions]
return [rxn for rxn, c_flux in reactions_fluxes if c_flux > 0]
|
Find all active carbon source reactions.
Parameters
----------
model : Model
A genome-scale metabolic model.
Returns
-------
list
The medium reactions with carbon input flux.
|
372,737
|
def set_property(self, key, value):
self.properties[key] = value
self.sync_properties()
|
Update only one property in the dict
|
372,738
|
def canonical_url(app, pagename, templatename, context, doctree):
base = context.get("canonical_url")
if not base:
return
target = app.builder.get_target_uri(pagename)
context["page_canonical_url"] = base + target
|
Build the canonical URL for a page. Appends the path for the
page to the base URL specified by the
``html_context["canonical_url"]`` config and stores it in
``html_context["page_canonical_url"]``.
|
372,739
|
def download(self, path, retry=5, timeout=10,
chunk_size=PartSize.DOWNLOAD_MINIMUM_PART_SIZE, wait=True,
overwrite=False):
if not overwrite and os.path.exists(path):
raise LocalFileAlreadyExists(message=path)
extra = {: self.__class__.__name__, : {
: self.id,
: path,
: overwrite,
: retry,
: timeout,
: chunk_size,
: wait,
}}
logger.info(, extra=extra)
info = self.download_info()
download = Download(
url=info.url, file_path=path, retry_count=retry, timeout=timeout,
part_size=chunk_size, api=self._api
)
if wait:
download.start()
download.wait()
else:
return download
|
Downloads the file and returns a download handle.
Download will not start until .start() method is invoked.
:param path: Full path to the new file.
:param retry: Number of retries if error occurs during download.
:param timeout: Timeout for http requests.
:param chunk_size: Chunk size in bytes.
:param wait: If true will wait for download to complete.
:param overwrite: If True will silently overwrite existing file.
:return: Download handle.
|
372,740
|
def bsp_resize(node: tcod.bsp.BSP, x: int, y: int, w: int, h: int) -> None:
node.x = x
node.y = y
node.width = w
node.height = h
|
.. deprecated:: 2.0
Assign directly to :any:`BSP` attributes instead.
|
372,741
|
def send_error(self, code, message=None):
try:
short, long = self.responses[code]
except KeyError:
short, long = ,
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
content = (self.error_message_format %
{: code, : _quote_html(message), : explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header(, )
self.end_headers()
if self.command != and code >= 200 and code not in (204, 304):
self.wfile.write(content)
|
Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
|
372,742
|
def get_insertion(cls) -> str:
indent = 1
blanks = * (indent+4)
subs = []
for name in cls.get_modelnames():
subs.extend([
f,
f,
f,
f,
f,
f,
f,
f])
model = importtools.prepare_model(name)
subs.append(cls.get_modelinsertion(model, indent + 4))
subs.extend([
f,
f,
f,
f,
f
])
return .join(subs)
|
Return the complete string to be inserted into the string of the
template file.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> print(XSDWriter.get_insertion()) # doctest: +ELLIPSIS
<element name="arma_v1"
substitutionGroup="hpcb:sequenceGroup"
type="hpcb:arma_v1Type"/>
<BLANKLINE>
<complexType name="arma_v1Type">
<complexContent>
<extension base="hpcb:sequenceGroupType">
<sequence>
<element name="fluxes"
minOccurs="0">
<complexType>
<sequence>
<element
name="qin"
minOccurs="0"/>
...
</complexType>
</element>
</sequence>
</extension>
</complexContent>
</complexType>
<BLANKLINE>
|
372,743
|
def set_run_on_node_mask(nodemask):
mask = set_to_numa_nodemask(nodemask)
tmp = bitmask_t()
tmp.maskp = cast(byref(mask), POINTER(c_ulong))
tmp.size = sizeof(nodemask_t) * 8
if libnuma.numa_run_on_node_mask(byref(tmp)) < 0:
raise RuntimeError()
|
Runs the current thread and its children only on nodes specified in nodemask.
They will not migrate to CPUs of other nodes until the node affinity is
reset with a new call to L{set_run_on_node_mask}.
@param nodemask: node mask
@type nodemask: C{set}
|
372,744
|
def get(self, resource_id=None):
if request.path.endswith():
return self._meta()
if resource_id is None:
error_message = is_valid_method(self.__model__)
if error_message:
raise BadRequestException(error_message)
if in request.args:
return self._export(self._all_resources())
return flask.jsonify({
self.__json_collection_name__: self._all_resources()
})
else:
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
return jsonify(resource)
|
Return an HTTP response object resulting from an HTTP GET call.
If *resource_id* is provided, return just the single resource.
Otherwise, return the full collection.
:param resource_id: The value of the resource's primary key
|
372,745
|
def mangleIR(data, ignore_errors=False):
try:
except:
if not ignore_errors:
raise
|
Mangle a raw Kira data packet into shorthand
|
372,746
|
def initialize_path(self, path_num=None):
self.state = copy(self.initial_state)
return self.state
|
initialize consumer for next path
|
372,747
|
def create(obj: PersistedObject, obj_type: Type[Any], arg_name: str):
return MissingMandatoryAttributeFiles( + str(obj) +
+ get_pretty_type_str(obj_type) +
\
)
|
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param obj:
:param obj_type:
:param arg_name:
:return:
|
372,748
|
def write(
contents: str,
path: Union[str, pathlib.Path],
verbose: bool = False,
logger_func=None,
) -> bool:
print_func = logger_func or print
path = pathlib.Path(path)
if path.exists():
with path.open("r") as file_pointer:
old_contents = file_pointer.read()
if old_contents == contents:
if verbose:
print_func("preserved {}".format(path))
return False
else:
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("rewrote {}".format(path))
return True
elif not path.exists():
if not path.parent.exists():
path.parent.mkdir(parents=True)
with path.open("w") as file_pointer:
file_pointer.write(contents)
if verbose:
print_func("wrote {}".format(path))
return True
|
Writes ``contents`` to ``path``.
Checks if ``path`` already exists and only write out new contents if the
old contents do not match.
Creates any intermediate missing directories.
:param contents: the file contents to write
:param path: the path to write to
:param verbose: whether to print output
|
372,749
|
def init_gl(self):
background = [.99, .99, .99, 1.0]
if in self.kwargs:
try:
background = to_rgba(self.kwargs[])
background = background.astype(np.float64) / 255.0
except BaseException:
log.error(,
exc_info=True)
self._gl_set_background(background)
self._gl_enable_depth(self.scene)
self._gl_enable_color_material()
self._gl_enable_blending()
self._gl_enable_smooth_lines()
self._gl_enable_lighting(self.scene)
|
Perform the magic incantations to create an
OpenGL scene using pyglet.
|
372,750
|
def doubleprox_dc(x, y, f, phi, g, K, niter, gamma, mu, callback=None):
r
primal_space = f.domain
dual_space = g.domain
if phi.domain != primal_space:
raise ValueError(
.format(primal_space, phi.domain))
if K.domain != primal_space:
raise ValueError(
.format(primal_space, K.domain))
if K.range != dual_space:
raise ValueError(
.format(dual_space, K.range))
g_convex_conj = g.convex_conj
for _ in range(niter):
f.proximal(gamma)(x.lincomb(1, x,
gamma, K.adjoint(y) - phi.gradient(x)),
out=x)
g_convex_conj.proximal(mu)(y.lincomb(1, y, mu, K(x)), out=y)
if callback is not None:
callback(x)
|
r"""Double-proxmial gradient d.c. algorithm of Banert and Bot.
This algorithm solves a problem of the form ::
min_x f(x) + phi(x) - g(Kx).
Parameters
----------
x : `LinearSpaceElement`
Initial primal guess, updated in-place.
y : `LinearSpaceElement`
Initial dual guess, updated in-place.
f : `Functional`
Convex functional. Needs to implement ``g.proximal``.
phi : `Functional`
Convex functional. Needs to implement ``phi.gradient``.
Convergence can be guaranteed if the gradient is Lipschitz continuous.
g : `Functional`
Convex functional. Needs to implement ``h.convex_conj.proximal``.
K : `Operator`
Linear operator. Needs to implement ``K.adjoint``
niter : int
Number of iterations.
gamma : positive float
Stepsize in the primal updates.
mu : positive float
Stepsize in the dual updates.
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
This algorithm is proposed in `[BB2016]
<https://arxiv.org/abs/1610.06538>`_ and solves the d.c. problem
.. math ::
\min_x f(x) + \varphi(x) - g(Kx)
together with its Toland dual
.. math ::
\min_y g^*(y) - (f + \varphi)^*(K^* y).
The iterations are given by
.. math ::
x_{n+1} &= \mathrm{Prox}_{\gamma f} (x_n + \gamma (K^* y_n
- \nabla \varphi(x_n))), \\
y_{n+1} &= \mathrm{Prox}_{\mu g^*} (y_n + \mu K x_{n+1}).
To guarantee convergence, the parameter :math:`\gamma` must satisfy
:math:`0 < \gamma < 2/L` where :math:`L` is the Lipschitz constant of
:math:`\nabla \varphi`.
References
----------
[BB2016] Banert, S, and Bot, R I. *A general double-proximal gradient
algorithm for d.c. programming*. arXiv:1610.06538 [math.OC] (2016).
See also
--------
dca :
Solver with subgradient steps for all the functionals.
prox_dca :
Solver with a proximal step for ``f`` and a subgradient step for ``g``.
|
372,751
|
def write_to(self, group, append=False):
data = self.data
if append is True:
try:
original = read_properties(group)
data = original + data
except EOFError:
pass
data = pickle.dumps(data).replace(b, b)
group[][...] = np.void(data)
|
Writes the properties to a `group`, or append it
|
372,752
|
def horizontal_headers(self, value):
if value is not None:
assert type(value) is OrderedDict, " attribute: type is not !".format(
"horizontal_headers", value)
self.__horizontal_headers = value
|
Setter for **self.__horizontal_headers** attribute.
:param value: Attribute value.
:type value: OrderedDict
|
372,753
|
def getLocalTime(date, time, *args, **kwargs):
if time is not None:
return getLocalDateAndTime(date, time, *args, **kwargs)[1]
|
Get the time in the local timezone from date and time
|
372,754
|
def activate(self):
payload = {
: ,
: ,
: self.scene_service
}
result = self.vera_request(**payload)
logger.debug("activate: "
"result of vera_request with payload %s: %s",
payload, result.text)
self._active = True
|
Activate a Vera scene.
This will call the Vera api to activate a scene.
|
372,755
|
def start(self, name):
d = self.boatd.post({: name}, endpoint=)
current = d.get()
if current is not None:
return .format(current)
else:
return
|
End the current behaviour and run a named behaviour.
:param name: the name of the behaviour to run
:type name: str
|
372,756
|
def getCSVReader(data, reader_type=csv.DictReader):
f = StringIO(data[:-4])
return reader_type(f)
|
Take a Rave CSV output ending with a line with just EOF on it and return a DictReader
|
372,757
|
def frames(self, most_recent=False):
if most_recent:
for i in xrange(4):
self._cap.grab()
for i in range(1):
if self._adjust_exposure:
try:
command = .format(self._device_id)
FNULL = open(os.devnull, )
subprocess.call(shlex.split(command), stdout=FNULL, stderr=subprocess.STDOUT)
except:
pass
ret, frame = self._cap.read()
rgb_data = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return ColorImage(rgb_data, frame=self._frame), None, None
|
Retrieve a new frame from the PhoXi and convert it to a ColorImage,
a DepthImage, and an IrImage.
Parameters
----------
most_recent: bool
If true, the OpenCV buffer is emptied for the webcam before reading the most recent frame.
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`, :obj:`IrImage`, :obj:`numpy.ndarray`
The ColorImage, DepthImage, and IrImage of the current frame.
|
372,758
|
def add(self, si):
if self.o_chunk is None:
if os.path.exists(self.t_path):
os.remove(self.t_path)
self.o_chunk = streamcorpus.Chunk(self.t_path, mode=)
self.o_chunk.add(si)
logger.debug(, len(self.o_chunk))
if len(self.o_chunk) == self.chunk_max:
self.close()
|
puts `si` into the currently open chunk, which it creates if
necessary. If this item causes the chunk to cross chunk_max,
then the chunk closed after adding.
|
372,759
|
def incr(self, key, value):
returns = []
for server in self.servers:
returns.append(server.incr(key, value))
return returns[0]
|
Increment a key, if it exists, returns it's actual value, if it don't, return 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be incremented
:type value: int
:return: Actual value of the key on server
:rtype: int
|
372,760
|
def all_time(self):
if self._all_time is None:
self._all_time = AllTimeList(self._version, account_sid=self._solution[], )
return self._all_time
|
Access the all_time
:returns: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList
:rtype: twilio.rest.api.v2010.account.usage.record.all_time.AllTimeList
|
372,761
|
def add_to_pythonpath(self, path):
pathlist = self.get_pythonpath()
if path in pathlist:
return False
else:
pathlist.insert(0, path)
self.set_pythonpath(pathlist)
return True
|
Add path to project's PYTHONPATH
Return True if path was added, False if it was already there
|
372,762
|
def extract_ast_species(ast):
species_id = "None"
species_label = "None"
species = [
(species_id, species_label) for (species_id, species_label) in ast.species if species_id
]
if len(species) == 1:
(species_id, species_label) = species[0]
if not species_id:
species_id = "None"
species_label = "None"
log.debug(f"AST Species: {ast.species} Species: {species} SpeciesID: {species_id}")
return (species_id, species_label)
|
Extract species from ast.species set of tuples (id, label)
|
372,763
|
def swapColors(self):
rgba = self.color.get_0_255()
self.color = self.secondColor
self.secondColor = Color(rgba, )
|
Swaps the current :py:class:`Color` with the secondary :py:class:`Color`.
:rtype: Nothing.
|
372,764
|
def all_dataset_ids(self, reader_name=None, composites=False):
try:
if reader_name:
readers = [self.readers[reader_name]]
else:
readers = self.readers.values()
except (AttributeError, KeyError):
raise KeyError("No reader found in scene" % reader_name)
all_datasets = [dataset_id
for reader in readers
for dataset_id in reader.all_dataset_ids]
if composites:
all_datasets += self.all_composite_ids()
return all_datasets
|
Get names of all datasets from loaded readers or `reader_name` if
specified..
:return: list of all dataset names
|
372,765
|
def init(deb1, deb2=False):
global DEBUG
global DEBUGALL
DEBUG = deb1
DEBUGALL = deb2
|
Initialize DEBUG and DEBUGALL.
Allows other modules to set DEBUG and DEBUGALL, so their
call to dprint or dprintx generate output.
Args:
deb1 (bool): value of DEBUG to set
deb2 (bool): optional - value of DEBUGALL to set,
defaults to False.
|
372,766
|
def is_result_edition_allowed(self, analysis_brain):
if not self.is_analysis_edition_allowed(analysis_brain):
return False
obj = api.get_object(analysis_brain)
if not obj.getDetectionLimitOperand():
return True
if obj.getDetectionLimitSelector():
if not obj.getAllowManualDetectionLimit():
return False
return True
|
Checks if the edition of the result field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False
|
372,767
|
def update_option_set_by_id(cls, option_set_id, option_set, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._update_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
else:
(data) = cls._update_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
return data
|
Update OptionSet
Update attributes of OptionSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_option_set_by_id(option_set_id, option_set, async=True)
>>> result = thread.get()
:param async bool
:param str option_set_id: ID of optionSet to update. (required)
:param OptionSet option_set: Attributes of optionSet to update. (required)
:return: OptionSet
If the method is called asynchronously,
returns the request thread.
|
372,768
|
def My_TreeTable(self, table, heads, heads2=None):
self.Define_TreeTable(heads, heads2)
self.Display_TreeTable(table)
|
Define and display a table
in which the values in first column form one or more trees.
|
372,769
|
def _set_rules(self, rules: dict, overwrite=True):
if not isinstance(rules, dict):
raise TypeError(
% type(rules))
if overwrite:
self.rules = Rules(rules, self.default_rule)
else:
self.rules.update(rules)
|
Created a new Rules object based on the provided dict of rules.
|
372,770
|
def start(self, timeout=None):
if self._running.isSet():
raise RuntimeError("Device client already started.")
self.ioloop = self._ioloop_manager.get_ioloop()
if timeout:
t0 = self.ioloop.time()
self._ioloop_manager.start(timeout)
self.ioloop.add_callback(self._install)
if timeout:
remaining_timeout = timeout - (self.ioloop.time() - t0)
self.wait_running(remaining_timeout)
|
Start the client in a new thread.
Parameters
----------
timeout : float in seconds
Seconds to wait for client thread to start. Do not specify a
timeout if start() is being called from the same ioloop that this
client will be installed on, since it will block the ioloop without
progressing.
|
372,771
|
def p_expression_land(self, p):
p[0] = Land(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1))
|
expression : expression LAND expression
|
372,772
|
def recognized_release(self):
_, _, rest = self.get_release_parts()
if rest == or re.match(r, rest):
return True
return False
|
Check if this Release value is something we can parse.
:rtype: bool
|
372,773
|
def __set_authoring_nodes(self, source, target):
editor = self.__script_editor.get_editor(source)
editor.set_file(target)
self.__script_editor.model.update_authoring_nodes(editor)
|
Sets given editor authoring nodes.
:param source: Source file.
:type source: unicode
:param target: Target file.
:type target: unicode
|
372,774
|
def getWindowByPID(self, pid, order=0):
if pid <= 0:
return None
EnumWindowsProc = ctypes.WINFUNCTYPE(
ctypes.c_bool,
ctypes.POINTER(ctypes.c_int),
ctypes.py_object)
def callback(hwnd, context):
if ctypes.windll.user32.IsWindowVisible(hwnd):
pid = ctypes.c_ulong()
ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid))
if context["pid"] == int(pid.value) and not context["handle"]:
if context["order"] > 0:
context["order"] -= 1
else:
context["handle"] = hwnd
return True
data = {"pid": pid, "handle": None, "order": order}
ctypes.windll.user32.EnumWindows(EnumWindowsProc(callback), ctypes.py_object(data))
return data["handle"]
|
Returns a handle for the first window that matches the provided PID
|
372,775
|
def get(self, url, headers=None, kwargs=None):
return self._request(
method=,
url=url,
headers=headers,
kwargs=kwargs
)
|
Make a GET request.
To make a GET request pass, ``url``
:param url: ``str``
:param headers: ``dict``
:param kwargs: ``dict``
|
372,776
|
def acl_remove_draft(self, id_vlan, type_acl):
parameters = dict(id_vlan=id_vlan, type_acl=type_acl)
uri = % parameters
return super(ApiVlan, self).get(uri)
|
Remove Acl draft by type
:param id_vlan: Identity of Vlan
:param type_acl: Acl type v4 or v6
:return: None
:raise VlanDoesNotExistException: Vlan Does Not Exist.
:raise InvalidIdVlanException: Invalid id for Vlan.
:raise NetworkAPIException: Failed to access the data source.
|
372,777
|
def parse_port_pin(name_str):
if len(name_str) < 3:
raise ValueError("Expecting pin name to be at least 3 characters")
if name_str[:2] != :
raise ValueError("Expecting pin name to start with GP")
if not name_str[2:].isdigit():
raise ValueError("Expecting numeric GPIO number")
port = int(int(name_str[2:]) / 8)
gpio_bit = 1 << int(int(name_str[2:]) % 8)
return (port, gpio_bit)
|
Parses a string and returns a (port, gpio_bit) tuple.
|
372,778
|
def get_member_class(resource):
reg = get_current_registry()
if IInterface in provided_by(resource):
member_class = reg.getUtility(resource, name=)
else:
member_class = reg.getAdapter(resource, IMemberResource,
name=)
return member_class
|
Returns the registered member class for the given resource.
:param resource: registered resource
:type resource: class implementing or instance providing or subclass of
a registered resource interface.
|
372,779
|
def position(self):
self._position, value = self.get_attr_int(self._position, )
return value
|
Returns the current position of the motor in pulses of the rotary
encoder. When the motor rotates clockwise, the position will increase.
Likewise, rotating counter-clockwise causes the position to decrease.
Writing will set the position to that value.
|
372,780
|
def merge_request(self, request_id):
request_url = "{}pull-request/{}/merge".format(self.create_basic_url(),
request_id)
return_value = self._call_api(request_url, method=)
LOG.debug(return_value)
|
Merge a pull request.
:param request_id: the id of the request
:return:
|
372,781
|
def info(environment, opts):
damaged = False
sites = environment.sites
if not environment.sites:
sites = []
damaged = True
if opts[]:
if damaged:
raise DatacatsError()
for site in sites:
environment.site_name = site
print .format(site, environment.web_address())
return
datadir = environment.datadir
if not environment.data_exists():
datadir =
elif damaged:
datadir +=
print + environment.name
print + environment.target
print + datadir
print + .join(environment.sites)
for site in environment.sites:
print
environment.site_name = site
print + site
print + .join(environment.containers_running())
sitedir = environment.sitedir + ( if not environment.data_complete() else )
print + sitedir
addr = environment.web_address()
if addr:
print + addr
|
Display information about environment and running containers
Usage:
datacats info [-qr] [ENVIRONMENT]
Options:
-q --quiet Echo only the web URL or nothing if not running
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
|
372,782
|
def get_steps_branch_len(self, length):
return log(length/self.length, min(self.branches[0][0]))
|
Get, how much steps will needed for a given branch length.
Returns:
float: The age the tree must achieve to reach the given branch length.
|
372,783
|
def list_archive(archive, verbosity=1, program=None, interactive=True):
util.check_existing_filename(archive)
if verbosity >= 0:
util.log_info("Listing %s ..." % archive)
return _handle_archive(archive, , verbosity=verbosity,
interactive=interactive, program=program)
|
List given archive.
|
372,784
|
def chebyshev_neg(h1, h2):
r
h1, h2 = __prepare_histogram(h1, h2)
return min(scipy.absolute(h1 - h2))
|
r"""
Chebyshev negative distance.
Also Tchebychev distance, Minimum or :math:`L_{-\infty}` metric; equal to Minowski
distance with :math:`p=-\infty`. For the case of :math:`p=+\infty`, use `chebyshev`.
The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is
defined as:
.. math::
d_{-\infty}(H, H') = \min_{m=1}^M|H_m-H'_m|
*Attributes:*
- semimetric (triangle equation satisfied?)
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, 1]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram.
Returns
-------
chebyshev_neg : float
Chebyshev negative distance.
See also
--------
minowski, chebyshev
|
372,785
|
def get_node(self, name):
r
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return self._db[name]
|
r"""
Get a tree node structure.
The structure is a dictionary with the following keys:
* **parent** (*NodeName*) Parent node name, :code:`''` if the
node is the root node
* **children** (*list of NodeName*) Children node names, an
empty list if node is a leaf
* **data** (*list*) Node data, an empty list if node contains no data
:param name: Node name
:type name: string
:rtype: dictionary
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
|
372,786
|
def generate_transit_lightcurve(
times,
mags=None,
errs=None,
paramdists={:sps.uniform(loc=0.1,scale=49.9),
:sps.uniform(loc=1.0e-4,scale=2.0e-2),
:sps.uniform(loc=0.01,scale=0.29)},
magsarefluxes=False,
):
transitperiodtransitdepthtransitdurationfrozenvartypeplanetparamstransitperiodtransitepochtransitdepthtransitdurationingressdurationtimesmagserrsvarperiodtransitperiodvaramplitudetransitdepth
if mags is None:
mags = np.full_like(times, 0.0)
if errs is None:
errs = np.full_like(times, 0.0)
epoch = npr.random()*(times.max() - times.min()) + times.min()
period = paramdists[].rvs(size=1)
depth = paramdists[].rvs(size=1)
duration = paramdists[].rvs(size=1)
ingduration = npr.random()*(0.5*duration - 0.05*duration) + 0.05*duration
if magsarefluxes and depth < 0.0:
depth = -depth
elif not magsarefluxes and depth > 0.0:
depth = -depth
modelmags, phase, ptimes, pmags, perrs = (
transits.trapezoid_transit_func([period, epoch, depth,
duration, ingduration],
times,
mags,
errs)
)
timeind = np.argsort(ptimes)
mtimes = ptimes[timeind]
mmags = modelmags[timeind]
merrs = perrs[timeind]
modeldict = {
:,
:{x:np.asscalar(y) for x,y in zip([,
,
,
,
],
[period,
epoch,
depth,
duration,
ingduration])},
:mtimes,
:mmags,
:merrs,
:period,
:depth
}
return modeldict
|
This generates fake planet transit light curves.
Parameters
----------
times : np.array
This is an array of time values that will be used as the time base.
mags,errs : np.array
These arrays will have the model added to them. If either is
None, `np.full_like(times, 0.0)` will used as a substitute and the model
light curve will be centered around 0.0.
paramdists : dict
This is a dict containing parameter distributions to use for the
model params, containing the following keys ::
{'transitperiod', 'transitdepth', 'transitduration'}
The values of these keys should all be 'frozen' scipy.stats distribution
objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The variability epoch will be automatically chosen from a uniform
distribution between `times.min()` and `times.max()`.
The ingress duration will be automatically chosen from a uniform
distribution ranging from 0.05 to 0.5 of the transitduration.
The transitdepth will be flipped automatically as appropriate if
`magsarefluxes=True`.
magsarefluxes : bool
If the generated time series is meant to be a flux time-series, set this
to True to get the correct sign of variability amplitude.
Returns
-------
dict
A dict of the form below is returned::
{'vartype': 'planet',
'params': {'transitperiod': generated value of period,
'transitepoch': generated value of epoch,
'transitdepth': generated value of transit depth,
'transitduration': generated value of transit duration,
'ingressduration': generated value of transit ingress
duration},
'times': the model times,
'mags': the model mags,
'errs': the model errs,
'varperiod': the generated period of variability == 'transitperiod'
'varamplitude': the generated amplitude of
variability == 'transitdepth'}
|
372,787
|
def _force(self,z,t=0.):
return self._Pot.zforce(self._R,z,phi=self._phi,t=t,use_physical=False)\
-self._Pot.zforce(self._R,0.,phi=self._phi,t=t,use_physical=False)
|
NAME:
_force
PURPOSE:
evaluate the force
INPUT:
z
t
OUTPUT:
F_z(z,t;R)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
|
372,788
|
def _generate_docstring(self, doc_type, quote):
docstring = None
self.quote3 = quote * 3
if quote == :
self.quote3_other = """"NumpydocGoogledoc':
docstring = self._generate_google_doc(func_info)
return docstring
|
Generate docstring.
|
372,789
|
def substitute_vars(template, replacements):
result = template
for (key, value) in replacements:
result = result.replace( + key + , value)
if in result:
logging.warning("A variable was not replaced in .", result)
return result
|
Replace certain keys with respective values in a string.
@param template: the string in which replacements should be made
@param replacements: a dict or a list of pairs of keys and values
|
372,790
|
def fetch_live(self, formatter=TableFormat):
fmt = formatter(self)
for results in self.execute():
if in results and results[].get():
yield fmt.formatted(results[][])
|
Fetch a live stream query. This is the equivalent of selecting
the "Play" option for monitoring fields within the SMC UI. Data will
be streamed back in real time.
:param formatter: Formatter type for data representation. Any type
in :py:mod:`smc_monitoring.models.formatters`.
:return: generator yielding results in specified format
|
372,791
|
def search_datasets(self, search_phrase, limit=None):
return self.backend.dataset_index.search(search_phrase, limit=limit)
|
Search for datasets.
|
372,792
|
def generate(env):
if not exists(env):
return
env[] = []
env[] = []
env[] =
env[].append( )
env[] = "$WIXLIGHT $WIXLIGHTFLAGS -out ${TARGET} ${SOURCES}"
env[] =
env[] =
object_builder = SCons.Builder.Builder(
action = ,
suffix = ,
src_suffix = )
linker_builder = SCons.Builder.Builder(
action = ,
src_suffix = ,
src_builder = object_builder)
env[][] = linker_builder
|
Add Builders and construction variables for WiX to an Environment.
|
372,793
|
def agg_iter(self, lower_limit=None, upper_limit=None):
lower_limit = lower_limit or self.get_bookmark().isoformat()
upper_limit = upper_limit or (
datetime.datetime.utcnow().replace(microsecond=0).isoformat())
aggregation_data = {}
self.agg_query = Search(using=self.client,
index=self.event_index).\
filter(, timestamp={
: self._format_range_dt(lower_limit),
: self._format_range_dt(upper_limit)})
for modifier in self.query_modifiers:
self.agg_query = modifier(self.agg_query)
hist = self.agg_query.aggs.bucket(
,
,
field=,
interval=self.aggregation_interval
)
terms = hist.bucket(
, , field=self.aggregation_field, size=0
)
top = terms.metric(
, , size=1, sort={: }
)
for dst, (metric, src, opts) in self.metric_aggregation_fields.items():
terms.metric(dst, metric, field=src, **opts)
results = self.agg_query.execute()
index_name = None
for interval in results.aggregations[].buckets:
interval_date = datetime.datetime.strptime(
interval[], )
for aggregation in interval[].buckets:
aggregation_data[] = interval_date.isoformat()
aggregation_data[self.aggregation_field] = aggregation[]
aggregation_data[] = aggregation[]
if self.metric_aggregation_fields:
for f in self.metric_aggregation_fields:
aggregation_data[f] = aggregation[f][]
doc = aggregation.top_hit.hits.hits[0][]
for destination, source in self.copy_fields.items():
if isinstance(source, six.string_types):
aggregation_data[destination] = doc[source]
else:
aggregation_data[destination] = source(
doc,
aggregation_data
)
index_name = .\
format(self.event,
interval_date.strftime(
self.index_name_suffix))
self.indices.add(index_name)
yield dict(_id=.
format(aggregation[],
interval_date.strftime(
self.doc_id_suffix)),
_index=index_name,
_type=self.aggregation_doc_type,
_source=aggregation_data)
self.last_index_written = index_name
|
Aggregate and return dictionary to be indexed in ES.
|
372,794
|
def next_block(self):
assert self.pos <= self.input_len
if self.pos == self.input_len:
return None
i = self.START_OVERSHOOT
while True:
try_size = int(self.bs * i)
size = self.check_request_size(try_size)
c, d = self.compress_next_chunk(size)
if size != try_size:
break
if len(d) < self.bs:
i += self.OVERSHOOT_INCREASE
else:
break
while True:
if len(d) <= self.bs:
self.c = c
crc32 = zlib.crc32(self.get_input(size), 0xffffffff) & 0xffffffff
self.pos += size
self.compressed_bytes += len(d)
return crc32, size, d
size -= 1
if size == 0:
return None
c, d = self.compress_next_chunk(size)
|
This could probably be improved; at the moment it starts by trying to overshoot the
desired compressed block size, then it reduces the input bytes one by one until it
has met the required block size
|
372,795
|
def F_to_K(self, F, method=):
Q2 = self.beta * self.theta
R2 = - self.R - dot(F.T, dot(self.Q, F))
A2 = self.A - dot(self.B, F)
B2 = self.C
lq = LQ(Q2, R2, A2, B2, beta=self.beta)
neg_P, neg_K, d = lq.stationary_values(method=method)
return -neg_K, -neg_P
|
Compute agent 2's best cost-minimizing response K, given F.
Parameters
----------
F : array_like(float, ndim=2)
A k x n array
method : str, optional(default='doubling')
Solution method used in solving the associated Riccati
equation, str in {'doubling', 'qz'}.
Returns
-------
K : array_like(float, ndim=2)
Agent's best cost minimizing response for a given F
P : array_like(float, ndim=2)
The value function for a given F
|
372,796
|
def copy(self):
t clone the serializers or deserializers and it won
try:
tmp = self.__class__()
except Exception:
tmp = self.__class__(self._pdict)
tmp._serializers = self._serializers
tmp.__deserializers = self.__deserializers
return tmp
|
makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events
|
372,797
|
def Setup(self):
self.Peers = []
self.DEAD_ADDRS = []
self.MissionsGlobal = []
self.NodeId = random.randint(1294967200, 4294967200)
|
Initialize the local node.
Returns:
|
372,798
|
def _truthtable(inputs, pcdata):
if len(inputs) == 0 and pcdata[0] in {PC_ZERO, PC_ONE}:
return {
PC_ZERO : TTZERO,
PC_ONE : TTONE
}[pcdata[0]]
elif len(inputs) == 1 and pcdata[0] == PC_ZERO and pcdata[1] == PC_ONE:
return inputs[0]
else:
return TruthTable(inputs, pcdata)
|
Return a truth table.
|
372,799
|
def stats(self, key=None):
returns = {}
for server in self.servers:
returns[server.server] = server.stats(key)
return returns
|
Return server stats.
:param key: Optional if you want status from a key.
:type key: six.string_types
:return: A dict with server stats
:rtype: dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.