Unnamed: 0 int64 0 389k | code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|---|
385,600 | def push_channel(self, content, channel, content_url=None):
parameters = {
: self.app_key,
: self.app_secret,
: channel
}
return self._push(content, , parameters, content_url) | Push a notification to a Pushed channel.
Param: content -> content of Pushed notification message
channel -> string identifying a Pushed channel
content_url (optional) -> enrich message with URL
Returns Shipment ID as string |
385,601 | def singleton(cls):
instances = {}
def get_instance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance | See <Singleton> design pattern for detail: http://www.oodesign.com/singleton-pattern.html
Python <Singleton> reference: http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
Recommend use Singleton as a metaclass
Usage:
@singleton
class MyClass(object):
pass |
385,602 | def remove_ancestors_of(self, node):
if isinstance(node, int):
warnings.warn(
,
DeprecationWarning, 2)
node = self._id_to_node[node]
anc = nx.ancestors(self._multi_graph, node)
for anc_node in... | Remove all of the ancestor operation nodes of node. |
385,603 | def _LegacyCheckHashesWithFileStore(self):
if not self.state.pending_hashes:
return
file_hashes = {}
hash_to_tracker = {}
for index, tracker in iteritems(self.state.pending_hashes):
if tracker.get("hash_obj") is None:
continue
hash_obj = tracker["... | Check all queued up hashes for existence in file store (legacy).
Hashes which do not exist in the file store will be downloaded. This
function flushes the entire queue (self.state.pending_hashes) in order to
minimize the round trips to the file store.
If a file was found in the file store it is copied... |
385,604 | def lookup(self, query=):
res = []
query = re.compile( % re.escape(query), self.reflags)
for name, email in self.get_contacts():
if query.match(name) or query.match(email):
res.append((name, email))
return res | looks up all contacts where name or address match query |
385,605 | def _get_image_workaround_seek(self, idx):
warnings.warn("imageio workaround used!")
cap = self.video_handle
mult = 50
for ii in range(idx//mult):
cap.get_data(ii*mult)
final = cap.get_data(idx)
return final | Same as __getitem__ but seek through the video beforehand
This is a workaround for an all-zero image returned by `imageio`. |
385,606 | def convert_units(self, desired, guess=False):
units._convert_units(self, desired, guess)
return self | Convert the units of the mesh into a specified unit.
Parameters
----------
desired : string
Units to convert to (eg 'inches')
guess : boolean
If self.units are not defined should we
guess the current units of the document and then convert? |
385,607 | def mine(self):
if PyFunceble.CONFIGURATION["mining"]:
try:
history = PyFunceble.requests.get(
self.to_get,
timeout=PyFunceble.CONFIGURATION["seconds_before_http_timeout"],
headers=... | Search for domain or URL related to the original URL or domain.
:return: The mined domains or URL.
:rtype: dict |
385,608 | def _write_comparison_plot_table(spid, models, options, core_results,
fit_results):
is_curve = in core_results[0][1]
df = core_results[spid][1]
df.rename(columns={: }, inplace=True)
if not is_curve:
x = np.arange(len(df)) + 1
df = df.sor... | Notes
-----
Only applies to analysis using functions from empirical in which models are
also given. |
385,609 | def _pload(offset, size):
output = []
indirect = offset[0] ==
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 + (size % 2 if not indirect else 0)
ix_changed = (indirect or size < 5) and (abs(I) + size) > 127
if ix_changed:
output.ap... | Generic parameter loading.
Emmits output code for setting IX at the right location.
size = Number of bytes to load:
1 => 8 bit value
2 => 16 bit value / string
4 => 32 bit value / f16 value
5 => 40 bit value |
385,610 | def iterfd(fd):
surrogatepass
unpk = msgpack.Unpacker(fd, **unpacker_kwargs)
for mesg in unpk:
yield mesg | Generator which unpacks a file object of msgpacked content.
Args:
fd: File object to consume data from.
Notes:
String objects are decoded using utf8 encoding. In order to handle
potentially malformed input, ``unicode_errors='surrogatepass'`` is set
to allow decoding bad input ... |
385,611 | def toggle(self, rows):
for r in Progress(rows, , total=len(self.rows)):
if not self.unselectRow(r):
self.selectRow(r) | Toggle selection of given `rows`. |
385,612 | def get_index(self, field_name, catalog):
index = catalog.Indexes.get(field_name, None)
if not index and field_name == "Title":
return self.get_index("sortable_title", catalog)
return index | Returns the index of the catalog for the given field_name, if any |
385,613 | def dirsplit(path):
r
parts = []
remain = path
part = True
while part != and remain != :
remain, part = split(remain)
parts.append(part)
parts = [p for p in parts if p != ]
if remain != :
parts.append(remain)
parts = parts[::-1]
return parts | r"""
Args:
path (str):
Returns:
list: components of the path
CommandLine:
python -m utool.util_path --exec-dirsplit
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> paths = []
>>> paths.append('E:/window file/foo')
... |
385,614 | def get_token_func():
print("{}: token updater was triggered".format(datetime.datetime.now()))
context = adal.AuthenticationContext(
str.format("https://login.microsoftonline.com/{}", settings.ACTIVE_DIRECTORY_TENANT_ID),
api_version=None, validate_aut... | This function makes a call to AAD to fetch an OAuth token
:return: the OAuth token and the interval to wait before refreshing it |
385,615 | def list_all_customers(cls, **kwargs):
kwargs[] = True
if kwargs.get():
return cls._list_all_customers_with_http_info(**kwargs)
else:
(data) = cls._list_all_customers_with_http_info(**kwargs)
return data | List Customers
Return a list of Customers
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_customers(async=True)
>>> result = thread.get()
:param async bool
:param int... |
385,616 | def libvlc_media_get_mrl(p_md):
f = _Cfunctions.get(, None) or \
_Cfunction(, ((1,),), string_result,
ctypes.c_void_p, Media)
return f(p_md) | Get the media resource locator (mrl) from a media descriptor object.
@param p_md: a media descriptor object.
@return: string with mrl of media descriptor object. |
385,617 | def update_url(self, url=None, regex=None):
if not url and not regex:
raise ValueError("Neither a url or regex was provided to update_url.")
headers = {
: self.token,
: ,
}
data = {
: settings.PRERENDER_TOKEN,
}
i... | Accepts a fully-qualified url, or regex.
Returns True if successful, False if not successful. |
385,618 | def _get_component_from_result(self, result, lookup):
for component in result[]:
if lookup[] in component[]:
return component.get(lookup[], )
return | Helper function to get a particular address component from a Google result.
Since the address components in results are an array of objects containing a types array,
we have to search for a particular component rather than being able to look it up directly.
Returns the first match, so this sho... |
385,619 | def update_machine_state(state_path):
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
salt_grains_path)
subprocess.check_call([
,
,
,
state_path,
]) | Update the machine state using the provided state declaration. |
385,620 | def friendly_load(parser, token):
bits = token.contents.split()
if len(bits) >= 4 and bits[-2] == "from":
name = bits[-1]
try:
lib = find_library(parser, name)
subset = load_from_library(lib, name, bits[1:-2])
parser.add_library(subset)
e... | Tries to load a custom template tag set. Non existing tag libraries
are ignored.
This means that, if used in conjunction with ``if_has_tag``, you can try to
load the comments template tag library to enable comments even if the
comments framework is not installed.
For example::
{% load fri... |
385,621 | def _update_pwm(self):
if self._is_on:
values = self._get_pwm_values()
else:
values = [0] * len(self._driver.pins)
self._driver.set_pwm(values) | Update the pwm values of the driver regarding the current state. |
385,622 | def fill_package(app_name, build_dir=None, install_dir=None):
zip_path = os.path.join(install_dir, % app_name)
with zipfile.ZipFile(zip_path, ) as zip_file:
fill_package_zip(zip_file, os.path.dirname(build_dir), prefix=app_name)
return zip_path | Creates the theme package (.zip) from templates and optionally
assets installed in the ``build_dir``. |
385,623 | def _path_to_id(path):
if path.endswith("/"):
path = path[:-1]
return os.path.basename(path) | Name of the root directory is used as ``<packageid>`` in ``info.xml``.
This function makes sure, that :func:`os.path.basename` doesn't return
blank string in case that there is `/` at the end of the `path`.
Args:
path (str): Path to the root directory.
Returns:
str: Basename of the `p... |
385,624 | def as_dtype(type_value):
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
try:
return _PYTHON_TO_TF[type_value]
... | Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType` object. This may
currently be a `tf.DType` object, a [`DataType`
enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.... |
385,625 | def _make_concept(self, entity):
name = self._sanitize(entity[])
db_refs = _get_grounding(entity)
concept = Concept(name, db_refs=db_refs)
metadata = {arg[]: arg[][]
for arg in entity[]}
... | Return Concept from a Hume entity. |
385,626 | def populate(self, compound_dict=None, x=1, y=1, z=1):
error_dict = {0: , 1: , 2: }
try:
x = int(x)
y = int(y)
z = int(z)
except (ValueError, TypeError):
raise ValueError(
.format(... | Expand lattice and create compound from lattice.
populate will expand lattice based on user input. The user must also
pass in a dictionary that contains the keys that exist in the
basis_dict. The corresponding Compound will be the full lattice
returned to the user.
If no dictio... |
385,627 | def projR(gamma, p):
return np.multiply(gamma.T, p / np.maximum(np.sum(gamma, axis=1), 1e-10)).T | return the KL projection on the row constrints |
385,628 | def _normalize_histogram2d(self, counts, type):
counts = (255 * (counts - np.nanmin(counts)) /
(np.nanmax(counts) - np.nanmin(counts)))
if type == :
counts = 255 - counts
return counts.astype(np.uint8) | Normalize the values of the counts for a 2D histogram.
This normalizes the values of a numpy array to the range 0-255.
:param counts: a NumPy array which is to be rescaled.
:param type: either 'bw' or 'reverse_bw'. |
385,629 | def _buildElementTree(self,):
t_elt = ctree.Element(self.name)
for k,v in [ (key,value) for key,value in self.__dict__.items() if key != ]:
if v and v != :
t_elt.set(k if k != else , str(v).lower())
self._etree = t_elt
return t_elt | Turn object into an ElementTree |
385,630 | def _tarboton_slopes_directions(data, dX, dY, facets, ang_adj):
shp = np.array(data.shape) - 1
direction = np.full(data.shape, FLAT_ID_INT, )
mag = np.full(data.shape, FLAT_ID_INT, )
slc0 = [slice(1, -1), slice(1, -1)]
for ind in xrange(8):
e1 = facets[ind][1]
e2 = facets[in... | Calculate the slopes and directions based on the 8 sections from
Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf |
385,631 | def launch_tor(config, reactor,
tor_binary=None,
progress_updates=None,
connection_creator=None,
timeout=None,
kill_on_stderr=True,
stdout=None, stderr=None):
from .controller import launch
tor = yield ... | Deprecated; use launch() instead.
See also controller.py |
385,632 | def pupv_to_vRvz(pu,pv,u,v,delta=1.,oblate=False):
if oblate:
denom= delta*(sc.sinh(u)**2.+sc.cos(v)**2.)
vR= (pu*sc.sinh(u)*sc.sin(v)+pv*sc.cosh(u)*sc.cos(v))/denom
vz= (pu*sc.cosh(u)*sc.cos(v)-pv*sc.sinh(u)*sc.sin(v))/denom
else:
denom= delta*(sc.sinh(u)**2.+sc.sin(v)**2.)... | NAME:
pupv_to_vRvz
PURPOSE:
calculate cylindrical vR and vz from momenta in prolate or oblate confocal u and v coordinates for a given focal length delta
INPUT:
pu - u momentum
pv - v momentum
u - u coordinate
v - v coordinate
delta= focus
oblate... |
385,633 | def upload_to_s3(self, key, filename):
extra_args = {: self.acl}
guess = mimetypes.guess_type(filename)
content_type = guess[0]
encoding = guess[1]
if content_type:
extra_args[] = content_type
if (self.gzip and content_type in self... | Set the content type and gzip headers if applicable
and upload the item to S3 |
385,634 | def get_cutoff(value: float, cutoff: Optional[float] = None) -> int:
cutoff = cutoff if cutoff is not None else 0
if value > cutoff:
return 1
if value < (-1 * cutoff):
return - 1
return 0 | Assign if a value is greater than or less than a cutoff. |
385,635 | def _get_comments(session, group_or_user_id, wall_id):
return session.fetch_items("wall.getComments", Comment.from_json, count=100, owner_id=group_or_user_id, post_id=wall_id, need_likes=1) | https://vk.com/dev/wall.getComments |
385,636 | def leaves(self):
self._ensure_parameters()
return self.exclude(
**{"%s__id__in" % self.model._cte_node_children: self.all()}
) | Returns a :class:`QuerySet` of all leaf nodes (nodes with no
children).
:return: A :class:`QuerySet` of all leaf nodes (nodes with no
children). |
385,637 | def union(self, *iterables):
return self.__class__(chain(iter(self), *iterables), key=self._key) | Return a new SortedSet with elements from the set and all *iterables*. |
385,638 | def _register(self, name):
templatehook = TemplateHook()
self._registry[name] = templatehook
return templatehook | @Api private
Add new :py:class:`TemplateHook` into the registry
:param str name: Hook name
:return: Instance of :py:class:`TemplateHook`
:rtype: :py:class:`TemplateHook` |
385,639 | def make_shell_logfile_data_url(host, shell_port, instance_id, offset, length):
return "http://%s:%d/filedata/log-files/%s.log.0?offset=%s&length=%s" % \
(host, shell_port, instance_id, offset, length) | Make the url for log-file data in heron-shell
from the info stored in stmgr. |
385,640 | def jinja_fragment_extension(tag, endtag=None, name=None, tag_only=False, allow_args=True, callblock_args=None):
if endtag is None:
endtag = "end" + tag
def decorator(f):
def parse(self, parser):
lineno = parser.stream.next().lineno
args = []
kwargs = []... | Decorator to easily create a jinja extension which acts as a fragment. |
385,641 | def align_epi_anat(anatomy,epi_dsets,skull_strip_anat=True):
if isinstance(epi_dsets,basestring):
epi_dsets = [epi_dsets]
if len(epi_dsets)==0:
nl.notify( % anatomy,level=nl.level.warning)
return
if all(os.path.exists(nl.suffix(x,)) for x in epi_dsets):
return
an... | aligns epis to anatomy using ``align_epi_anat.py`` script
:epi_dsets: can be either a string or list of strings of the epi child datasets
:skull_strip_anat: if ``True``, ``anatomy`` will be skull-stripped using the default method
The default output suffix is "_al" |
385,642 | def colors(palette):
all_colors = {
: [, , , ],
: [, , ]
}
if palette == :
result = all_colors
else:
result = {palette: all_colors.get(palette)}
return jsonify(result) | Example endpoint return a list of colors by palette
This is using docstring for specifications
---
tags:
- colors
parameters:
- name: palette
in: path
type: string
enum: ['all', 'rgb', 'cmyk']
required: true
default: all
description: Which pale... |
385,643 | def mkCuttingStock(s):
w,q = [],[]
for item in sorted(s):
if w == [] or item != w[-1]:
w.append(item)
q.append(1)
else:
q[-1] += 1
return w,q | mkCuttingStock: convert a bin packing instance into cutting stock format |
385,644 | def add_listener(self, on_message=None):
request = topic_add_message_listener_codec.encode_request(self.name, False)
def handle(item, publish_time, uuid):
member = self._client.cluster.get_member_by_uuid(uuid)
item_event = TopicMessage(self.name, item, publish_time, mem... | Subscribes to this topic. When someone publishes a message on this topic, on_message() function is called if
provided.
:param on_message: (Function), function to be called when a message is published.
:return: (str), a registration id which is used as a key to remove the listener. |
385,645 | def build_query(self, case_id, query=None, variant_ids=None, category=):
query = query or {}
mongo_query = {}
gene_query = None
for criterion in FUNDAMENTAL_CRITERIA:
if criterion == :
LOG.debug("Building a mongo query for... | Build a mongo query
These are the different query options:
{
'genetic_models': list,
'chrom': str,
'thousand_genomes_frequency': float,
'exac_frequency': float,
'clingen_ngi': int,
'cadd_score': float,
... |
385,646 | def warn_quirks(message, recommend, pattern, index):
import traceback
import bs4
paths = (MODULE, sys.modules[].__path__[0])
tb = traceback.extract_stack()
previous = None
filename = None
lineno = None
for entry in tb:
if (PY35 and entry.filename.startswith(paths)) ... | Warn quirks. |
385,647 | def vsan_datastore_configured(name, datastore_name):
s VSAN datastore
WARNING: The VSAN datastore is created automatically after the first
ESXi host is added to the cluster; the state assumes that the datastore
exists and errors if it doesn
cluster_name, datacenter_name = \
__salt__[](... | Configures the cluster's VSAN datastore
WARNING: The VSAN datastore is created automatically after the first
ESXi host is added to the cluster; the state assumes that the datastore
exists and errors if it doesn't. |
385,648 | def TBH(cpu, dest):
base_addr = dest.get_mem_base_addr()
if dest.mem.base in (, ):
base_addr = cpu.PC
offset = cpu.read_int(base_addr + dest.get_mem_offset(), 16)
offset = Operators.ZEXTEND(offset, cpu.address_bit_size)
... | Table Branch Halfword causes a PC-relative forward branch using a table of single halfword offsets. A base
register provides a pointer to the table, and a second register supplies an index into the table. The branch
length is twice the value of the halfword returned from the table.
:param ARMv7... |
385,649 | def hashleftjoin(left, right, key=None, lkey=None, rkey=None, missing=None,
cache=True, lprefix=None, rprefix=None):
lkey, rkey = keys_from_args(left, right, key, lkey, rkey)
return HashLeftJoinView(left, right, lkey, rkey, missing=missing,
cache=cache, lprefix... | Alternative implementation of :func:`petl.transform.joins.leftjoin`,
where the join is executed by constructing an in-memory lookup for the
right hand table, then iterating over rows from the left hand table.
May be faster and/or more resource efficient where the right table is small
and the left t... |
385,650 | def write_kwargs_to_attrs(cls, attrs, **kwargs):
for arg, val in kwargs.items():
if val is None:
val = str(None)
if isinstance(val, dict):
attrs[arg] = val.keys()
cls.write_kwargs_to_attrs(attrs, **val)
... | Writes the given keywords to the given ``attrs``.
If any keyword argument points to a dict, the keyword will point to a
list of the dict's keys. Each key is then written to the attrs with its
corresponding value.
Parameters
----------
attrs : an HDF attrs
Th... |
385,651 | def search(self, title=None, libtype=None, **kwargs):
args = {}
if title:
args[] = title
if libtype:
args[] = utils.searchType(libtype)
for attr, value in kwargs.items():
args[attr] = value
key = % utils.joinArgs(args)
return ... | Searching within a library section is much more powerful. It seems certain
attributes on the media objects can be targeted to filter this search down
a bit, but I havent found the documentation for it.
Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other... |
385,652 | def create_linear(num_finite_buckets, width, offset):
if num_finite_buckets <= 0:
raise ValueError(_BAD_NUM_FINITE_BUCKETS)
if width <= 0.0:
raise ValueError(_BAD_FLOAT_ARG % (u, 0.0))
return sc_messages.Distribution(
bucketCounts=[0] * (num_finite_buckets + 2),
linearBu... | Creates a new instance of distribution with linear buckets.
Args:
num_finite_buckets (int): initializes number of finite buckets
width (float): initializes the width of each bucket
offset (float): initializes the offset
Return:
:class:`endpoints_management.gen.servicecontrol_v1_mes... |
385,653 | def uhstack(arrs):
v = np.hstack(arrs)
v = _validate_numpy_wrapper_units(v, arrs)
return v | Stack arrays in sequence horizontally while preserving units
This is a wrapper around np.hstack that preserves units.
Examples
--------
>>> from unyt import km
>>> a = [1, 2, 3]*km
>>> b = [2, 3, 4]*km
>>> print(uhstack([a, b]))
[1 2 3 2 3 4] km
>>> a = [[1],[2],[3]]*km
>>> b =... |
385,654 | def observable(operator, rho, unfolding, complex=False):
r
if len(rho.shape) == 2:
return np.array([observable(operator, i, unfolding) for i in rho])
Ne = unfolding.Ne
Mu = unfolding.Mu
obs = 0
if unfolding.normalized:
rho11 = 1 - sum([rho[Mu(1, i, i)] for i in range(1, Ne)])
... | r"""Return an observable ammount.
INPUT:
- ``operator`` - An square matrix representing a hermitian operator \
in thesame basis as the density matrix.
- ``rho`` - A density matrix in unfolded format, or a list of such \
density matrices.
- ``unfolding`` - A mapping from matrix element indic... |
385,655 | def intersection(self, other):
ivs = set()
shorter, longer = sorted([self, other], key=len)
for iv in shorter:
if iv in longer:
ivs.add(iv)
return IntervalTree(ivs) | Returns a new tree of all intervals common to both self and
other. |
385,656 | def _consolidate_repo_sources(sources):
if not isinstance(sources, sourceslist.SourcesList):
raise TypeError(
{0}\{1}\.format(
type(sources),
sourceslist.SourcesList
)
)
consolidated = {}
delete_files = set()
base_file = sourc... | Consolidate APT sources. |
385,657 | def init(self):
"Initialize the message-digest and set all fields to zero."
self.length = 0L
self.input = []
self.A = 0x67452301L
self.B = 0xefcdab89L
self.C = 0x98badcfeL
self.D = 0x10325476L | Initialize the message-digest and set all fields to zero. |
385,658 | def add_states(self, *states):
for state in states:
self.states[state] = EventManagerPlus(self) | Add @states. |
385,659 | def load_data(filespec, idx=None, logger=None, **kwargs):
global loader_registry
info = iohelper.get_fileinfo(filespec)
filepath = info.filepath
if idx is None:
idx = info.numhdu
try:
typ, subtyp = iohelper.guess_filetype(filepath)
except Exception as e:
... | Load data from a file.
This call is used to load a data item from a filespec (path or URL)
Parameters
----------
filespec : str
The path of the file to load (can be a URL).
idx : int or string (optional)
The index or name of the data unit in the file (e.g. an HDU name)
logger... |
385,660 | def run_work(self):
if os.path.exists(LOCAL_EVAL_ROOT_DIR):
sudo_remove_dirtree(LOCAL_EVAL_ROOT_DIR)
self.run_attacks()
self.run_defenses() | Run attacks and defenses |
385,661 | def move_items(self, from_group, to_group):
if from_group not in self.keys() or len(self.groups[from_group]) == 0:
return
self.groups.setdefault(to_group, list()).extend(self.groups.get
(from_group, list()))
if from_gr... | Take all elements from the from_group and add it to the to_group. |
385,662 | def _attr_sort_func(model, iter1, iter2, attribute):
attr1 = getattr(model[iter1][0], attribute, None)
attr2 = getattr(model[iter2][0], attribute, None)
return cmp(attr1, attr2) | Internal helper |
385,663 | def _find_usage_security_groups(self):
vpc_count = 0
paginator = self.conn.get_paginator()
for page in paginator.paginate():
for group in page[]:
if in group and group[] is not None:
vpc_count += 1
self.limits[]._add_curr... | find usage for security groups |
385,664 | def diagnose_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "diagnose_cluster" not in self._inner_api_calls:
... | Gets cluster diagnostic information. After the operation completes, the
Operation.response field contains ``DiagnoseClusterOutputLocation``.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
... |
385,665 | def fetch_access_token(self):
return self._fetch_access_token(
url=,
params={
: ,
: self.appid,
: self.secret
}
) | 获取 access token
详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=通用接口文档
:return: 返回的 JSON 数据包 |
385,666 | def removi(item, inset):
assert isinstance(inset, stypes.SpiceCell)
assert inset.dtype == 2
item = ctypes.c_int(item)
libspice.removi_c(item, ctypes.byref(inset)) | Remove an item from an integer set.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/removi_c.html
:param item: Item to be removed.
:type item: int
:param inset: Set to be updated.
:type inset: spiceypy.utils.support_types.SpiceCell |
385,667 | def _get_representative(self, obj):
if obj not in self._parents:
self._parents[obj] = obj
self._weights[obj] = 1
self._prev_next[obj] = [obj, obj]
self._min_values[obj] = obj
return obj
path = [obj]
root = self._parents[obj]
... | Finds and returns the root of the set containing `obj`. |
385,668 | def set_finished(self):
component_name = self.get_component_name()
self.log(
logging.INFO,
"Component [%s] is being marked as finished.",
component_name)
existing_state = self.__get_state(component_name)
assert existing_state == fss.const... | This stores the number of items that have been pushed, and
transitions the current component to the FINISHED state (which precedes
the STOPPED state). The FINISHED state isn't really necessary unless
methods/hooks are overridden to depend on it, but the count must be
stored at one po... |
385,669 | def parse_keys_and_ranges(i_str, keyfunc, rangefunc):
while i_str:
m = _STREAM_ID_RE.match(i_str)
if m:
for retval in keyfunc(stream_id_to_kvlayer_key(m.group())):
yield retval
i_str = i_str[m.end():]
while i_str and ((i_str[0] ==... | Parse the :class:`from_kvlayer` input string.
This accepts two formats. In the textual format, it accepts any
number of stream IDs in timestamp-docid format, separated by ``,``
or ``;``, and processes those as individual stream IDs. In the
binary format, it accepts 20-byte key blobs (16 bytes md5 has... |
385,670 | def list_versions(self, layer_id):
target_url = self.client.get_url(, , , {: layer_id})
return base.Query(self, target_url, valid_filter_attributes=(,), valid_sort_attributes=()) | Filterable list of versions of a layer, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source__revision=value)``. Specific values depend on the source type.
Use ``data__source_revision__lt`` or ``data__source_re... |
385,671 | def sigma_clipping(date, mag, err, threshold=3, iteration=1):
if (len(date) != len(mag)) \
or (len(date) != len(err)) \
or (len(mag) != len(err)):
raise RuntimeError()
for i in range(int(iteration)):
mean = np.median(mag)
std = np.std(mag)
index ... | Remove any fluctuated data points by magnitudes.
Parameters
----------
date : array_like
An array of dates.
mag : array_like
An array of magnitudes.
err : array_like
An array of magnitude errors.
threshold : float, optional
Threshold for sigma-clipping.
itera... |
385,672 | def delete(self, id):
try:
response = yield self.client.delete(id)
if response.get("n") > 0:
self.write({"message": "Deleted %s object: %s" % (self.object_name, id) })
return
self.raise_error(404, "Resource not found")
excep... | Delete a resource by bson id
:raises: 404 Not Found
:raises: 400 Bad request
:raises: 500 Server Error |
385,673 | def get_sequence_rule_mdata():
return {
: {
: {
: ,
: str(DEFAULT_LANGUAGE_TYPE),
: str(DEFAULT_SCRIPT_TYPE),
: str(DEFAULT_FORMAT_TYPE),
},
: {
: ,
: str(DEFAULT_LANGUAGE... | Return default mdata map for SequenceRule |
385,674 | def down_capture(returns, factor_returns, **kwargs):
return down(returns, factor_returns, function=capture, **kwargs) | Compute the capture ratio for periods when the benchmark return is negative
Parameters
----------
returns : pd.Series or np.ndarray
Returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series or np.ndarray
No... |
385,675 | def _setup_subpix(self,nside=2**16):
if hasattr(self,): return
self.roi_radius = self.config[][]
logger.info("Setup subpixels...")
self.nside_pixel = self.config[][]
self.nside_subpixel = self.nside_pixel * 2**4
epsilon = np.degrees... | Subpixels for random position generation. |
385,676 | def insert_from_segmentlistdict(self, seglists, name, version = None, comment = None, valid=None):
for instrument, segments in seglists.items():
if valid is None:
curr_valid = ()
else:
curr_valid = valid[instrument]
self.add(LigolwSegmentList(active = segments, instruments = set([instrument]), n... | Insert the segments from the segmentlistdict object
seglists as a new list of "active" segments into this
LigolwSegments object. The dictionary's keys are assumed
to provide the instrument name for each segment list. A
new entry will be created in the segment_definer table for
the segment lists, and the dic... |
385,677 | def assign(self, attrs):
for k, v in attrs.items():
setattr(self, k, v) | Merge new attributes |
385,678 | def discretize_wd_style(N, q, F, d, Phi):
DEBUG = False
Ts = []
potential =
r0 = libphoebe.roche_pole(q, F, d, Phi)
pot_name = potential
dpdx = globals()[%(pot_name)]
dpdy = globals()[%(pot_name)]
dpdz = globals()[%(pot_name)]
if DEBUG:
import matplotlib.pyplo... | TODO: add documentation
New implementation. I'll make this work first, then document. |
385,679 | def on_service_departure(self, svc_ref):
with self._lock:
if svc_ref is self.reference:
self._value.unset_service()
self.reference = None
self._pending_ref = self._context.get_service_refere... | Called when a service has been unregistered from the framework
:param svc_ref: A service reference |
385,680 | def run_friedman_smooth(x, y, span):
N = len(x)
weight = numpy.ones(N)
results = numpy.zeros(N)
residuals = numpy.zeros(N)
mace.smooth(x, y, weight, span, 1, 1e-7, results, residuals)
return results, residuals | Run the FORTRAN smoother. |
385,681 | def forwards(self, orm):
"Write your forwards methods here."
db_table = orm[]._meta.db_table
db.execute(.format(db_table))
db.execute(.format(db_table))
cohorts = list(orm[].objects.all())
for c in cohorts:
db.execute(, [c.pk]) | Write your forwards methods here. |
385,682 | def ObsBandpass(obstring, graphtable=None, comptable=None, component_dict={}):
ob=ObservationMode(obstring,graphtable=graphtable,
comptable=comptable,component_dict=component_dict)
if len(ob) > 1:
return ObsModeBandpass(ob)
else:
return TabularSpectralEl... | Generate a bandpass object from observation mode.
If the bandpass consists of multiple throughput files
(e.g., "acs,hrc,f555w"), then `ObsModeBandpass` is returned.
Otherwise, if it consists of a single throughput file
(e.g., "johnson,v"), then `~pysynphot.spectrum.TabularSpectralElement`
is return... |
385,683 | def getLayout(kind=None,theme=None,title=,xTitle=,yTitle=,zTitle=,barmode=,bargap=None,bargroupgap=None,
margin=None, dimensions=None, width=None, height=None,
annotations=None,is3d=False,**kwargs):
for key in list(kwargs.keys()):
if key not in __LAYOUT_KWARGS:
raise Exception("Invalid keyword : ".f... | Generates a plotly Layout
Parameters:
-----------
theme : string
Layout Theme
solar
pearl
white
title : string
Chart Title
xTitle : string
X Axis Title
yTitle : string
Y Axis Title
zTitle : string
Z Axis Title
Applicable only for 3d charts
barmode : string
Mode when displ... |
385,684 | def memoized(maxsize=1024):
cache = SimpleCache(maxsize=maxsize)
def decorator(obj):
@wraps(obj)
def new_callable(*a, **kw):
def create_new():
return obj(*a, **kw)
key = (a, tuple(kw.items()))
return cache.get(key, create_new)
re... | Momoization decorator for immutable classes and pure functions. |
385,685 | def build_map_type_validator(item_validator):
def validate_mapping(value):
return dict(item_validator(item) for item in validate_list(value))
return validate_mapping | Return a function which validates that the value is a mapping of
items. The function should return pairs of items that will be
passed to the `dict` constructor. |
385,686 | def calc_remotedemand_v1(self):
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
flu.remotedemand = max(con.remotedischargeminimum[der.toy[self.idx_sim]] -
flu.naturalremotedischarge, 0.) | Estimate the discharge demand of a cross section far downstream.
Required control parameter:
|RemoteDischargeMinimum|
Required derived parameters:
|dam_derived.TOY|
Required flux sequence:
|dam_derived.TOY|
Calculated flux sequence:
|RemoteDemand|
Basic equation:
:... |
385,687 | def create_frames(until=None):
now = Date.now()
if until:
get_orbit(until, now)
else:
for body in list_bodies():
get_orbit(body.name, now) | Create frames available in the JPL files
Args:
until (str): Name of the body you want to create the frame of, and all frames in between.
If ``None`` all the frames available in the .bsp files will be created
Example:
.. code-block:: python
# All frames between Earth ... |
385,688 | def edit_item(self):
index = self.currentIndex()
if not index.isValid():
return
self.edit(index.child(index.row(), 3)) | Edit item |
385,689 | def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc, n_proc=1):
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
if n_proc == 1:
for jj in range(nstars):
flux = fluxes[jj,:]
ivar = ivars[jj,:]
pix = np.arange(0, npi... | Fit a continuum to a continuum pixels in a segment of spectra
Functional form can be either sinusoid or chebyshev, with specified degree
Parameters
----------
fluxes: numpy ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars,... |
385,690 | def _polar(self):
try:
return self._hidden_polar_axes
except AttributeError:
fig = self.get_figure()
self._hidden_polar_axes = fig.add_axes(self.get_position(True),
frameon=False, projection=)... | The "hidden" polar axis used for azimuth labels. |
385,691 | def covar_plotter3d_plotly(embedding, rieman_metric, inspect_points_idx,
colors, **kwargs):
def rgb2hex(rgb):
return % tuple(rgb)
return [ plt_data for idx in inspect_points_idx
for plt_data in plot_ellipse_plotly(
rieman_metric[idx], embeddi... | 3 Dimensional Covariance plotter using matplotlib backend. |
385,692 | def make_input_from_plain_string(sentence_id: SentenceId, string: str) -> TranslatorInput:
return TranslatorInput(sentence_id, tokens=list(data_io.get_tokens(string)), factors=None) | Returns a TranslatorInput object from a plain string.
:param sentence_id: Sentence id.
:param string: An input string.
:return: A TranslatorInput. |
385,693 | def delete(self, name):
conn = self._client.connect()
conn.execute( self._table.delete().where(self._table.c.name==name) ) | Delete time series by name across all intervals. Returns the number of
records deleted. |
385,694 | def max_profit_optimized(prices):
cur_max, max_so_far = 0, 0
for i in range(1, len(prices)):
cur_max = max(0, cur_max + prices[i] - prices[i-1])
max_so_far = max(max_so_far, cur_max)
return max_so_far | input: [7, 1, 5, 3, 6, 4]
diff : [X, -6, 4, -2, 3, -2]
:type prices: List[int]
:rtype: int |
385,695 | def find_view_function(module_name, function_name, fallback_app=None, fallback_template=None, verify_decorator=True):
dmp = apps.get_app_config()
try:
spec = find_spec(module_name)
except ValueError:
spec = None
if spec is None:
try:
retu... | Finds a view function, class-based view, or template view.
Raises ViewDoesNotExist if not found. |
385,696 | def get_timedelta_str(timedelta, exclude_zeros=False):
if timedelta == datetime.timedelta(0):
return
days = timedelta.days
hours, rem = divmod(timedelta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
fmtstr_list = []
fmtdict = {}
def append_cases(unit, fmtlbl):
if n... | get_timedelta_str
Returns:
str: timedelta_str, formated time string
References:
http://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_time import * # NOQA
>>> timedelta = get_unix_timedelta... |
385,697 | def from_response(cls, response, attrs):
proj = response[]
index = cls(proj[], response[],
attrs[response[][1][]],
proj.get())
index.response = response
return index | Create an index from returned Dynamo data |
385,698 | def patch_clean_fields(model):
old_clean_fields = model.clean_fields
def new_clean_fields(self, exclude=None):
if hasattr(self, ):
for field_name, value in self._mt_form_pending_clear.items():
field = self... | Patch clean_fields method to handle different form types submission. |
385,699 | def get(self, name):
config = self.get_block( % name)
if not config:
return None
resource = super(EthernetInterface, self).get(name)
resource.update(dict(name=name, type=))
resource.update(self._parse_sflow(config))
resource.update(self._parse_flowc... | Returns an interface as a set of key/value pairs
Args:
name (string): the interface identifier to retrieve the from
the configuration
Returns:
A Python dictionary object of key/value pairs that represent
the current configuration for the specified no... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.