Unnamed: 0 int64 0 389k | code stringlengths 26 79.6k | docstring stringlengths 1 46.9k |
|---|---|---|
387,800 | def getmap(self, path, query=None):
code, data, ctype = self.get(path, query)
if ctype != :
self.log.error("Expecting JSON from GET of , got ", self.lastpath, ctype)
raise HttpError(code=400, content_type=, content=+ctype)
try:
result = json.loads(dat... | Performs a GET request where the response content type is required to be
"application/json" and the content is a JSON-encoded data structure.
The decoded structure is returned. |
387,801 | def iterfiles(self):
try:
for path in self.order:
yield self.files[path]
except:
for winfile in self.files.values():
yield winfile | Yield all WinFile object. |
387,802 | def fov_for_height_and_distance(height, distance):
vfov_deg = np.degrees(2.0 * np.arctan(height * 0.5 / distance))
return vfov_deg | Calculate the FOV needed to get a given frustum height at a
given distance. |
387,803 | def data(self, **query):
data = self.gencloud.project_data(self.id)
query[] = self.id
ids = set(d[] for d in self.gencloud.api.dataid.get(**query)[])
return [d for d in data if d.id in ids] | Query for Data object annotation. |
387,804 | def __create_channel_run(self, channel, username, token):
data = {
: channel.get_node_id().hex,
: self.__get_chef_name(),
: __version__,
: username,
: token,
: config.DOMAIN,
}
try:
response = requests.p... | Sends a post request to create the channel run. |
387,805 | def download_extract(url):
logger.info("Downloading %s", url)
request = urllib2.Request(url)
request.add_header(,
)
opener = urllib2.build_opener()
with tempfile.TemporaryFile(suffix=, dir=env.WEATHER_DATA_PATH) \
as local_file:
logger.debug(, local_fi... | download and extract file. |
387,806 | def save(self):
with open(self.filename, ) as file:
self.prune()
self.data[] = self.version
json.dump(self.data,
file,
sort_keys=True, indent=2) | Save data. |
387,807 | def _get_range(book, range_, sheet):
filename = None
if isinstance(book, str):
filename = book
book = opxl.load_workbook(book, data_only=True)
elif isinstance(book, opxl.Workbook):
pass
else:
raise TypeError
if _is_range_address(range_):
sheet_names = [... | Return a range as nested dict of openpyxl cells. |
387,808 | def forwards(apps, schema_editor):
Event = apps.get_model(, )
Work = apps.get_model(, )
WorkRole = apps.get_model(, )
WorkSelection = apps.get_model(, )
for event in Event.objects.filter(kind=):
work.slug = generate_slug(work.pk)
work.save()
... | Having added the new 'exhibition' Work type, we're going to assume that
every Event of type 'museum' should actually have one Exhibition attached.
So, we'll add one, with the same title as the Event.
And we'll move all Creators from the Event to the Exhibition. |
387,809 | def extract_transformers_from_source(source):
lines = source.split()
linenumbers = []
for number, line in enumerate(lines):
if FROM_EXPERIMENTAL.match(line):
add_transformers(line)
linenumbers.insert(0, number)
for number in linenumbers:
del lines[numbe... | Scan a source for lines of the form
from __experimental__ import transformer1 [,...]
identifying transformers to be used. Such line is passed to the
add_transformer function, after which it is removed from the
code to be executed. |
387,810 | def save_related(self, request, form, formsets, change):
super(MenuItemAdmin, self).save_related(request, form, formsets, change)
self.model.objects.rebuild() | Rebuilds the tree after saving items related to parent. |
387,811 | def _remove_redundancy_routers(self, context, router_ids, ports,
delete_ha_groups=False):
subnets_info = [{: port[][0][]}
for port in ports]
for r_id in router_ids:
for i in range(len(subnets_info)):
self.rem... | Deletes all interfaces of the specified redundancy routers
and then the redundancy routers themselves. |
387,812 | def do_rename(argdict):
site = make_site_obj(argdict)
slug = argdict[]
newtitle = argdict[]
try:
site.rename_page(slug, newtitle)
print "Renamed page."
except ValueError:
print "Cannot rename. A page with the given slug does not exist."
sys.exit() | Rename a page. |
387,813 | def define(self, value, lineno, namespace=None):
if self.defined:
error(lineno, "label already defined at line %i" % (self.name, self.lineno))
self.value = value
self.lineno = lineno
self.namespace = NAMESPACE if namespace is None else namespace | Defines label value. It can be anything. Even an AST |
387,814 | def _genA(self):
p, df = self._p, self.df
A = np.zeros((p, p))
for i in range(p):
A[i, i] = sqrt(st.chi2.rvs(df - i))
for j in range(p-1):
for i in range(j+1, p):
A[i, j] = np.random.randn()
return A | Generate the matrix A in the Bartlett decomposition
A is a lower triangular matrix, with
A(i, j) ~ sqrt of Chisq(df - i + 1) when i == j
~ Normal() when i > j |
387,815 | def load_data(path, fmt=None, bg_data=None, bg_fmt=None,
meta_data={}, holo_kw={}, as_type="float32"):
path = pathlib.Path(path).resolve()
for kk in meta_data:
if kk not in qpimage.meta.DATA_KEYS:
msg = "Meta data key not allowed: {}".format(kk)
raise Valu... | Load experimental data
Parameters
----------
path: str
Path to experimental data file or folder
fmt: str
The file format to use (see `file_formats.formats`).
If set to `None`, the file format is guessed.
bg_data: str
Path to background data file or `qpimage.QPImage`
... |
387,816 | def format(self, record):
s = super(ANSIFormatter, self).format(record)
if hasattr(self.context, ):
s = self.context.ansi(s, **self.get_sgr(record))
return s | Overridden method that applies SGR codes to log messages. |
387,817 | def _register_view(self, app, resource, *urls, **kwargs):
endpoint = kwargs.pop(, None) or resource.__name__.lower()
self.endpoints.add(endpoint)
if endpoint in getattr(app, , {}):
existing_view_class = app.view_functions[endpoint].__dict__[]
if ex... | Bind resources to the app.
:param app: an actual :class:`flask.Flask` app
:param resource:
:param urls:
:param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower`
Can be used to reference this route in :meth:`flask.url_for`
:type endpoint: str
... |
387,818 | def rm(self, container_alias):
title = % self.__class__.__name__
input_fields = {
: container_alias
}
for key, value in input_fields.items():
object_title = % (title, key, str(value))
self.fields.validate(value, % key, ob... | a method to remove an active container
:param container_alias: string with name or id of container
:return: string with container id |
387,819 | def _decode_embedded_list(src):
output = []
for elem in src:
if isinstance(elem, dict):
elem = _decode_embedded_dict(elem)
elif isinstance(elem, list):
elem = _decode_embedded_list(elem)
elif isinstance(elem, bytes):
try:
elem = ... | Convert enbedded bytes to strings if possible.
List helper. |
387,820 | def update_item(self, payload, last_modified=None):
to_send = self.check_items([payload])[0]
if last_modified is None:
modified = payload["version"]
else:
modified = last_modified
ident = payload["key"]
headers = {"If-Unmodified-Since-Version": st... | Update an existing item
Accepts one argument, a dict containing Item data |
387,821 | def from_local_name(acs, attr, name_format):
for aconv in acs:
if aconv.name_format == name_format:
return aconv.to_format(attr)
return attr | :param acs: List of AttributeConverter instances
:param attr: attribute name as string
:param name_format: Which name-format it should be translated to
:return: An Attribute instance |
387,822 | def getFilename(name):
name = re.sub(r"[^0-9a-zA-Z_\-\.]", "_", name)
while ".." in name:
name = name.replace(, )
while "__" in name:
name = name.replace(, )
if name.startswith((".", "-")):
name = name[1:]
return name | Get a filename from given name without dangerous or incompatible characters. |
387,823 | def _get_config_value(profile, config_name):
s configuration value based on
the supplied configuration name.
profile
The profile name that contains configuration information.
config_name
The configuration item
config = __salt__[](profile)
if not config:
raise CommandExe... | Helper function that returns a profile's configuration value based on
the supplied configuration name.
profile
The profile name that contains configuration information.
config_name
The configuration item's name to use to return configuration values. |
387,824 | def _get_record_attrs(out_keys):
if len(out_keys) == 1:
attr = list(out_keys.keys())[0]
if out_keys[attr]:
return attr, out_keys[attr]
return None, None | Check for records, a single key plus output attributes. |
387,825 | def AddClient(self, client):
client_id, keywords = self.AnalyzeClient(client)
self.AddKeywordsForName(client_id, keywords) | Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update. |
387,826 | def pin(package, version, checks, marker, resolving, lazy, quiet):
root = get_root()
package_name = package.lower()
version = version.lower()
for check_name in sorted(os.listdir(root)):
pinned_reqs_file = os.path.join(root, check_name, )
resolved_reqs_file = os.path.join(root, chec... | Pin a dependency for all checks that require it. This can
also resolve transient dependencies.
Setting the version to `none` will remove the package. You can
specify an unlimited number of additional checks to apply the
pin for via arguments. |
387,827 | def response_hook(self, r, **kwargs):
if r.status_code == 401:
www_authenticate = r.headers.get(, ).lower()
auth_type = _auth_type_from_header(www_authenticate)
if auth_type is not None:
return self.retry_using_http_NTLM_auth(
... | The actual hook handler. |
387,828 | def disable_multicolor(self):
for color in [, , ]:
self.multicolorscales[color].config(state=tk.DISABLED, bg=)
self.multicolorframes[color].config(bg=)
self.multicolorlabels[color].config(bg=)
self.multicolordropdowns[color].config(bg=, state=tk.... | swap from the multicolor image to the single color image |
387,829 | def AssignTasksToClient(self, client_id):
rules = self.Get(self.Schema.RULES)
if not rules:
return 0
if data_store.RelationalDBEnabled():
last_foreman_run = self._GetLastForemanRunTimeRelational(client_id)
else:
last_foreman_run = self._GetLastForemanRunTime(client_id)
lates... | Examines our rules and starts up flows based on the client.
Args:
client_id: Client id of the client for tasks to be assigned.
Returns:
Number of assigned tasks. |
387,830 | def update_hacluster_dns_ha(service, relation_data,
crm_ocf=):
assert_charm_supports_dns_ha()
settings = [, ,
, ]
hostname_group = []
for setting in settings:
hostname = config(setting)
if hostname is None:
log(
... | Configure DNS-HA resources based on provided configuration
@param service: Name of the service being configured
@param relation_data: Pointer to dictionary of relation data.
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA |
387,831 | def binned_bitsets_from_list( list=[] ):
last_chrom = None
last_bitset = None
bitsets = dict()
for l in list:
chrom = l[0]
if chrom != last_chrom:
if chrom not in bitsets:
bitsets[chrom] = BinnedBitSet(MAX)
last_chrom = chrom
last_... | Read a list into a dictionary of bitsets |
387,832 | def RemoveDevice(self, object_path):
adapter = mockobject.objects[self.path]
adapter.EmitSignal(ADAPTER_IFACE, ,
, [object_path]) | Remove (forget) a device |
387,833 | def is_contextfree(self):
for lhs, rhs in self.rules:
if len(lhs) != 1:
return False
if lhs[0] not in self.nonterminals:
return False
return True | Returns True iff the grammar is context-free. |
387,834 | def unregister(self, name):
try:
name = name.name
except AttributeError:
pass
return self.pop(name,None) | Unregister function by name. |
387,835 | def check_load(grid, mode):
crit_branches = {}
crit_stations = []
if mode == :
load_factor_mv_line_lc_normal = float(cfg_ding0.get(,
))
load_factor_mv_cable_lc_normal = float(cfg_ding0.get(,
... | Checks for over-loading of branches and transformers for MV or LV grid.
Parameters
----------
grid : GridDing0
Grid identifier.
mode : str
Kind of grid ('MV' or 'LV').
Returns
-------
:obj:`dict`
Dict of critical branches with max. relative overloading, and the
... |
387,836 | def compile_relative_distances(self, sympy_accesses=None):
if sympy_accesses is None:
sympy_accesses = self.compile_sympy_accesses()
sympy_distances = defaultdict(list)
for var_name, accesses in sympy_accesses.items():
for i in range(1, len(accesses)):
... | Return load and store distances between accesses.
:param sympy_accesses: optionally restrict accesses, default from compile_sympy_accesses()
e.g. if accesses are to [+N, +1, -1, -N], relative distances are [N-1, 2, N-1]
returned is a dict of list of sympy expressions, for each variable |
387,837 | def get_pending_withdrawals(self, currency=None):
return self._api_query(path_dict={
API_V2_0:
}, options={: currency} if currency else None,
protection=PROTECTION_PRV) | Used to view your pending withdrawals
Endpoint:
1.1 NO EQUIVALENT
2.0 /key/balance/getpendingwithdrawals
:param currency: String literal for the currency (ie. BTC)
:type currency: str
:return: pending withdrawals in JSON
:rtype : list |
387,838 | def new(cls, settings, *args, **kwargs):
logger.debug( % settings[])
cloud = settings[]
if cloud == :
self = BareInstance(settings=settings, *args, **kwargs)
elif cloud == :
self = AWSInstance(settings=settings, *args, **kwargs)
elif cloud == :
... | Create a new Cloud instance based on the Settings |
387,839 | def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
source_hint = None
try:
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
source = self.... | Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the tem... |
387,840 | def decorator_handle(tokens):
defs = []
decorates = []
for i, tok in enumerate(tokens):
if "simple" in tok and len(tok) == 1:
decorates.append("@" + tok[0])
elif "test" in tok and len(tok) == 1:
varname = decorator_var + "_" + str(i)
defs.append(varna... | Process decorators. |
387,841 | def _extract_coeffs(self, imt):
C_HR = self.COEFFS_HARD_ROCK[imt]
C_BC = self.COEFFS_BC[imt]
C_SR = self.COEFFS_SOIL_RESPONSE[imt]
SC = self.COEFFS_STRESS[imt]
return C_HR, C_BC, C_SR, SC | Extract dictionaries of coefficients specific to required
intensity measure type. |
387,842 | def jtype(c):
ct = c[]
return ct if ct != else .format(ct, c.get()) | Return the a string with the data type of a value, for JSON data |
387,843 | def _bool_segments(array, start=0, delta=1, minlen=1):
array = iter(array)
i = 0
while True:
try:
val = next(array)
except StopIteration:
return
if val:
n = 1
try:
while next(array):
n... | Yield segments of consecutive `True` values in a boolean array
Parameters
----------
array : `iterable`
An iterable of boolean-castable values.
start : `float`
The value of the first sample on the indexed axis
(e.g.the GPS start time of the array).
delta : `float`
... |
387,844 | def sticker_templates():
voc = DisplayList()
stickers = getStickerTemplates()
for sticker in stickers:
voc.add(sticker.get(), sticker.get())
if voc.index == 0:
logger.warning()
return voc | It returns the registered stickers in the system.
:return: a DisplayList object |
387,845 | def exclude(source, keys, *, transform=None):
check = keys if callable(keys) else lambda key: key in keys
return {key: transform(source[key]) if transform else source[key]
for key in source if not check(key)} | Returns a dictionary excluding keys from a source dictionary.
:source: a dictionary
:keys: a set of keys, or a predicate function that accepting a key
:transform: a function that transforms the values |
387,846 | def coerce(self, values):
if isinstance(values, compat.basestring):
values = tuple(value.strip() for value in values.split())
opt_iter = tuple(copy.deepcopy(self._option) for value in values)
for opt_obj, val in compat.zip(opt_iter, values):
... | Convert an iterable of literals to an iterable of options.
Args:
values (iterable or string): An iterable of raw values to convert
into options. If the value is a string is is assumed to be a
comma separated list and will be split before processing.
Returns:
... |
387,847 | def params_of_mean(value=array([-.005, 1.]), tau=.1, rate=4.):
def logp(value, tau, rate):
if value[1] > 0 and value[1] + value[0] * 110 > 0:
return normal_like(value[0], 0., tau) + \
exponential_like(value[1], rate)
else:
return -Inf
def random(tau... | Intercept and slope of rate stochastic of poisson distribution
Rate stochastic must be positive for t in [0,T]
p(intercept, slope|tau,rate) =
N(slope|0,tau) Exp(intercept|rate) 1(intercept>0) 1(intercept + slope * T>0) |
387,848 | def _get_color(self, r, g, b):
clr = (r, g, b)
return clr | Convert red, green and blue values specified in floats with
range 0-1 to whatever the native widget color object is. |
387,849 | def wrap(msg, indent, indent_first=True):
wrapper.width = 120
wrapper.initial_indent = indent
wrapper.subsequent_indent = indent
msg = wrapper.fill(msg)
return msg if indent_first else msg[len(indent):] | Helper function that wraps msg to 120-chars page width. All lines (except maybe 1st) will be prefixed with
string {indent}. First line is prefixed only if {indent_first} is True.
:param msg: string to indent
:param indent: string that will be used for indentation
:param indent_first: if True then ... |
387,850 | def toposort(data):
if len(data) == 0:
return
data = data.copy()
for k, v in data.items():
v.discard(k)
extra_items_in_deps = reduce(set.union, data.values()) - set(data.keys())
data.update(dict((item, set()) for item in extra_items_in_deps))
whil... | Dependencies are expressed as a dictionary whose keys are items
and whose values are a set of dependent items. Output is a list of
sets in topological order. The first set consists of items with no
dependences, each subsequent set consists of items that depend upon
items in the preceeding sets.
:par... |
387,851 | def int_list_packer(term, values):
DENSITY = 10
MIN_RANGE = 20
singletons = set()
ranges = []
exclude = set()
sorted = jx.sort(values)
last = sorted[0]
curr_start = last
curr_excl = set()
for v in sorted[1::]:
if v <= last + 1:
pass
elif v... | return singletons, ranges and exclusions |
387,852 | def delete(self):
if lib.EnvDeleteInstance(self._env, self._ist) != 1:
raise CLIPSError(self._env) | Delete the instance. |
387,853 | def wrap_many(self, *args, strict=False):
for arg in args:
is_elem = arg and isinstance(arg, DOMElement)
is_elem_iter = (
not is_elem and arg and isinstance(arg, Iterable) and isinstance(iter(arg).__next__(), DOMElement)
)
if not (is_elem... | Wraps different copies of this element inside all empty tags
listed in params or param's (non-empty) iterators.
Returns list of copies of this element wrapped inside args
or None if not succeeded, in the same order and same structure,
i.e. args = (Div(), (Div())) -> value = (A(...), (A(... |
387,854 | def update(did):
required_attributes = [, , , , , ,
]
required_metadata_base_attributes = [, , , ,
, , , ]
required_metadata_curation_attributes = [, ]
assert isinstance(request.json, dict),
data = request.json
if not da... | Update DDO of an existing asset
---
tags:
- ddo
consumes:
- application/json
parameters:
- in: body
name: body
required: true
description: DDO of the asset.
schema:
type: object
required:
- "@context"
- created... |
387,855 | def check_in(self, url: str, new_status: Status,
increment_try_count: bool=True,
url_result: Optional[URLResult]=None):
| Update record for processed URL.
Args:
url: The URL.
new_status: Update the item status to `new_status`.
increment_try_count: Whether to increment the try counter
for the URL.
url_result: Additional values. |
387,856 | def cut_from_block(html_message):
block = html_message.xpath(
("//*[starts-with(mg:text_content(), )]|"
"//*[starts-with(mg:text_content(), )]"))
if block:
block = block[-1]
parent_div = None
while block.getparent() is not None:
if block.tag == :
... | Cuts div tag which wraps block starting with "From:". |
387,857 | def get_help(obj, env, subcmds):
doc = txt.dedent(obj.__doc__ or "")
env = env.copy()
doc = doc.strip()
if not re.search(r"^usage:\s*$", doc, flags=re.IGNORECASE | re.MULTILINE):
doc += txt.dedent()
help_line = (" %%-%ds %%s"
% (max([5] + [len(a) for a in subcmds]... | Interpolate complete help doc of given object
Assumption that given object as a specific interface:
obj.__doc__ is the basic help object.
obj.get_actions_titles() returns the subcommand if any. |
387,858 | def list_build_configuration_sets(page_size=200, page_index=0, sort="", q=""):
data = list_build_configuration_sets_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | List all build configuration sets |
387,859 | def tf_idf(text):
_raise_error_if_not_sarray(text, "text")
if len(text) == 0:
return _turicreate.SArray()
dataset = _turicreate.SFrame({: text})
scores = _feature_engineering.TFIDF().fit_transform(dataset)
return scores[] | Compute the TF-IDF scores for each word in each document. The collection
of documents must be in bag-of-words format.
.. math::
\mbox{TF-IDF}(w, d) = tf(w, d) * log(N / f(w))
where :math:`tf(w, d)` is the number of times word :math:`w` appeared in
document :math:`d`, :math:`f(w)` is the number... |
387,860 | def main():
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
import argparse
parser = argparse.ArgumentParser(description=)
parser.add_argument(,,type=str, default=,help=)
parser.add_argument(,type=str, help=)
parser.add_argument(,type=str, help=)
pa... | Main entry point |
387,861 | def get_all_publications(return_namedtuples=True):
sources = [
ben_cz.get_publications,
grada_cz.get_publications,
cpress_cz.get_publications,
zonerpress_cz.get_publications,
]
publications = []
for source in sources:
publications.extend(
fi... | Get list publications from all available source.
Args:
return_namedtuples (bool, default True): Convert :class:`.Publication`
structures to namedtuples (used in AMQP
communication).
Returns:
list: List of :class:`.Publication` structures co... |
387,862 | def download_ts(self, path, chunk, process_last_line=True):
import glob
ret_chunk = []
partial_chunk =
lines = chunk.strip().split()
if not process_last_line:
partial_chunk = lines.pop()
for line in lines:
if line.startswith():
... | This will look for a download ts link.
It will then download that file and replace the
link with the local file.
:param process_last_line:
:param path: str of the path to put the file
:param chunk: str of the chunk file, note this could have partial lines
:return: s... |
387,863 | def c(*args, **kwargs):
with Reflect.context(**kwargs) as r:
kwargs["args"] = args
instance = C_CLASS(r, stream, **kwargs)
instance() | kind of like od -c on the command line, basically it dumps each character and info
about that char
since -- 2013-5-9
*args -- tuple -- one or more strings to dump |
387,864 | def differences_between(self, current_files, parent_files, changes, prefixes):
parent_oid = None
if any(is_tree for _, is_tree, _ in changes):
if len(changes) == 1:
wanted_path = list(changes)[0][0]
parent_oid = frozenset([oid for path, is_tree, oid ... | yield (thing, changes, is_path)
If is_path is true, changes is None and thing is the path as a tuple.
If is_path is false, thing is the current_files and parent_files for
that changed treeentry and changes is the difference between current_files
and parent_files.
The code here... |
387,865 | def _rest_post(self, suburi, request_headers, request_body):
return self._rest_op(, suburi, request_headers, request_body) | REST POST operation.
The response body after the operation could be the new resource, or
ExtendedError, or it could be empty. |
387,866 | def on_for_seconds(self, left_speed, right_speed, seconds, brake=True, block=True):
if seconds < 0:
raise ValueError("seconds is negative ({})".format(seconds))
(left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed)
... | Rotate the motors at 'left_speed & right_speed' for 'seconds'. Speeds
can be percentages or any SpeedValue implementation. |
387,867 | def install_remote(self):
package, pkg_folder = None, None
try:
package = self._download()
pkg_folder = self._extract(package)
napp_folder = self._get_local_folder(pkg_folder)
dst = self._installed / self.user / self.napp
self._check_m... | Download, extract and install NApp. |
387,868 | def getRelativePath(basepath, path):
basepath = splitpath(os.path.abspath(basepath))
path = splitpath(os.path.abspath(path))
afterCommon = False
for c in basepath:
if afterCommon or path[0] != c:
path.insert(0, os.path.pardir)
afterCommon = True
else:
... | Get a path that is relative to the given base path. |
387,869 | def _from_dict(cls, _dict):
args = {}
if in _dict:
args[] = [
LanguageModel._from_dict(x)
for x in (_dict.get())
]
else:
raise ValueError(
customizations\
)
return cls(**args) | Initialize a LanguageModels object from a json dictionary. |
387,870 | def make_encoder(self,formula_dict,inter_list,param_dict):
X_dict = {}
Xcol_dict = {}
encoder_dict = {}
for key in formula_dict:
encoding,arg = formula_dict[key]
if in encoding:
drop_name = arg
... | make the encoder function |
387,871 | def associate_route_table(self, route_table_id, subnet_id):
params = {
: route_table_id,
: subnet_id
}
result = self.get_object(, params, ResultSet)
return result.associationId | Associates a route table with a specific subnet.
:type route_table_id: str
:param route_table_id: The ID of the route table to associate.
:type subnet_id: str
:param subnet_id: The ID of the subnet to associate with.
:rtype: str
:return: The ID of the association creat... |
387,872 | def export_throw_event_info(node_params, output_element):
definitions = node_params[consts.Consts.event_definitions]
for definition in definitions:
definition_id = definition[consts.Consts.id]
definition_type = definition[consts.Consts.definition_type]
output... | Adds EndEvent or IntermediateThrowingEvent attributes to exported XML element
:param node_params: dictionary with given intermediate throw event parameters,
:param output_element: object representing BPMN XML 'intermediateThrowEvent' element. |
387,873 | def _rapRperiAxiEq(R,E,L,pot):
return E-potentialAxi(R,pot)-L**2./2./R**2. | The vr=0 equation that needs to be solved to find apo- and pericenter |
387,874 | def get_measurement_id_options(self):
document = self._get_document_for_url(
self._get_url_for_measurements()
)
measurement_ids = self._get_measurement_ids(document)
return measurement_ids | Returns list of measurement choices. |
387,875 | def create_small_thumbnail(self, token, item_id):
parameters = dict()
parameters[] = token
parameters[] = item_id
response = self.request(
, parameters)
return response | Create a 100x100 small thumbnail for the given item. It is used for
preview purpose and displayed in the 'preview' and 'thumbnails'
sidebar sections.
:param token: A valid token for the user in question.
:type token: string
:param item_id: The item on which to set the thumbnail.... |
387,876 | def on(self, event, listener, *user_args):
self._listeners[event].append(
_Listener(callback=listener, user_args=user_args)) | Register a ``listener`` to be called on ``event``.
The listener will be called with any extra arguments passed to
:meth:`emit` first, and then the extra arguments passed to :meth:`on`
last.
If the listener function returns :class:`False`, it is removed and will
not be called th... |
387,877 | def get_image_grad(net, image, class_id=None):
return _get_grad(net, image, class_id, image_grad=True) | Get the gradients of the image.
Parameters:
----------
net: Block
Network to use for visualization.
image: NDArray
Preprocessed image to use for visualization.
class_id: int
Category ID this image belongs to. If not provided,
network's prediction will be used. |
387,878 | def determine_if_whitespace(self):
value = self.current.value
if value == :
self.is_space = True
else:
self.is_space = False
if (value == or regexes[].match(value)):
self.is_space = True | Set is_space if current token is whitespace
Is space if value is:
* Newline
* Empty String
* Something that matches regexes['whitespace'] |
387,879 | def authorized_default_handler(resp, remote, *args, **kwargs):
response_token_setter(remote, resp)
db.session.commit()
return redirect(url_for()) | Store access token in session.
Default authorized handler.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response. |
387,880 | def init_registry_from_json(mongo, filename, clear_collection=False):
with open(filename, ) as f:
models = json.load(f)
init_registry(mongo, models, clear_collection) | Initialize a model registry with a list of model definitions that are
stored in a given file in Json format.
Parameters
----------
mongo : scodata.MongoDBFactory
Connector for MongoDB
filename : string
Path to file containing model definitions
clear_collection : boolean
... |
387,881 | def write(self):
self._check()
cache = self._cache
pristine_cache = self._pristine_cache
self._pristine_cache = cache.copy()
changes = []
def apply_changes(content, stat):
del changes[:]
current = yaml.load... | Write object state to Zookeeper.
This will write the current state of the object to Zookeeper,
taking the final merged state as the new one, and resetting
any write buffers. |
387,882 | def wait_for_crm_operation(operation):
logger.info("wait_for_crm_operation: "
"Waiting for operation {} to finish...".format(operation))
for _ in range(MAX_POLLS):
result = crm.operations().get(name=operation["name"]).execute()
if "error" in result:
raise Except... | Poll for cloud resource manager operation until finished. |
387,883 | def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9 | Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim] |
387,884 | def _load(self):
with open(self._pickle_file, ) as source:
pickler = pickle.Unpickler(source)
for attribute in self._pickle_attributes:
pickle_data = pickler.load()
setattr(self, attribute, pickle_data) | Load data from a pickle file. |
387,885 | def get_nsing(self,epsilon=1.0e-4):
mx = self.xtqx.shape[0]
nsing = mx - np.searchsorted(
np.sort((self.xtqx.s.x / self.xtqx.s.x.max())[:,0]),epsilon)
if nsing == mx:
self.logger.warn("optimal nsing=npar")
nsing = None
return nsing | get the number of solution space dimensions given
a ratio between the largest and smallest singular values
Parameters
----------
epsilon: float
singular value ratio
Returns
-------
nsing : float
number of singular components above the eps... |
387,886 | def defaults(self):
self.chart_style = {}
self.chart_opts = {}
self.style("color", "
self.width(900)
self.height(250) | Reset the chart options and style to defaults |
387,887 | def load_glove_df(filepath, **kwargs):
pdkwargs = dict(index_col=0, header=None, sep=r, skiprows=[0], verbose=False, engine=)
pdkwargs.update(kwargs)
return pd.read_csv(filepath, **pdkwargs) | Load a GloVE-format text file into a dataframe
>>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt'))
>>> df.index[:3]
Index(['the', ',', '.'], dtype='object', name=0)
>>> df.iloc[0][:3]
1 0.41800
2 0.24968
3 -0.41242
Name: the, dtype: float64 |
387,888 | def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
data_format="channels_first",
use_td=False,
targeting_rate=None,
keep_prob=None,
... | Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the ... |
387,889 | def init_prior(self, R):
centers, widths = self.init_centers_widths(R)
prior = np.zeros(self.K * (self.n_dim + 1))
self.set_centers(prior, centers)
self.set_widths(prior, widths)
self.set_prior(prior)
return self | initialize prior for the subject
Returns
-------
TFA
Returns the instance itself. |
387,890 | def printSequences(x, formatString="%d"):
[seqLen, numElements] = x.shape
for i in range(seqLen):
s = ""
for j in range(numElements):
s += formatString % x[i][j]
print s | Print a bunch of sequences stored in a 2D numpy array. |
387,891 | async def execute_all_with_names(self, subprocesses, container = None, retnames = (,), forceclose = True):
retvalue1retvalue2
if not subprocesses:
return []
subprocesses = list(subprocesses)
if len(subprocesses) == 1 and (container is None or container is self) and forceclose... | DEPRECATED Execute all subprocesses and get the return values.
:param subprocesses: sequence of subroutines (coroutines)
:param container: if specified, run subprocesses in another container.
:param retnames: DEPRECATED get return value from container.(name) for each n... |
387,892 | def _get_notifications_status(self, notifications):
if notifications:
size = len(notifications["activeNotifications"])
else:
size = 0
status = self.status_notif if size > 0 else self.status_no_notif
return (size, status) | Get the notifications status |
387,893 | def get_cached_moderated_reddits(self):
if self._mod_subs is None:
self._mod_subs = {: self.reddit_session.get_subreddit()}
for sub in self.reddit_session.get_my_moderation(limit=None):
self._mod_subs[six.text_type(sub).lower()] = sub
return self._mod_sub... | Return a cached dictionary of the user's moderated reddits.
This list is used internally. Consider using the `get_my_moderation`
function instead. |
387,894 | def contains(self, times, keep_inside=True, delta_t=DEFAULT_OBSERVATION_TIME):
current_max_order = self.max_order
new_max_order = TimeMOC.time_resolution_to_order(delta_t)
if new_max_order > current_max_order:
message = \
.format(
... | Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the
TMOC instance.
Parameters
----------
times : `astropy.time.Time`
astropy times to check whether they are contained in the TMOC or not.
keep_inside : bool, optional
True b... |
387,895 | def lookup(self, pathogenName, sampleName):
pathogenIndex = self._pathogens[pathogenName]
sampleIndex = self._samples[sampleName]
return self._readsFilenames[(pathogenIndex, sampleIndex)] | Look up a pathogen name, sample name combination and get its
FASTA/FASTQ file name and unique read count.
This method should be used instead of C{add} in situations where
you want an exception to be raised if a pathogen/sample combination has
not already been passed to C{add}.
... |
387,896 | def highlight_occurences(editor):
format = editor.language.theme.get("accelerator.occurence")
if not format:
return False
extra_selections = editor.extraSelections() or []
if not editor.isReadOnly():
word = editor.get_word_under_cursor()
if not word:
return Fal... | Highlights given editor current line.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool |
387,897 | def list(self, full_properties=False, filter_args=None):
resource_obj_list = []
uris = self.partition.get_property()
if uris:
for uri in uris:
resource_obj = self.resource_class(
manager=self,
uri=uri,
... | List the Virtual Functions of this Partition.
Authorization requirements:
* Object-access permission to this Partition.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties should be
retrieved, vs. only the short set as re... |
387,898 | def load_obs(self, mask_threshold=0.5):
print("Loading obs ", self.run_date, self.model_name, self.forecast_variable)
start_date = self.run_date + timedelta(hours=self.start_hour)
end_date = self.run_date + timedelta(hours=self.end_hour)
mrms_grid = MRMSGrid(start_date, end_dat... | Loads observations and masking grid (if needed).
Args:
mask_threshold: Values greater than the threshold are kept, others are masked. |
387,899 | def _normalise_weights(logZ, weights, ntrim=None):
logZ -= logZ.max()
Zs = numpy.exp(logZ)
weights = [w/w.sum()*Z for w, Z in zip(weights, Zs)]
wmax = max([w.max() for w in weights])
weights = [w/wmax for w in weights]
ntot = sum([w.sum() for w in weights])
if ntrim is not None an... | Correctly normalise the weights for trimming
This takes a list of log-evidences, and re-normalises the weights so that
the largest weight across all samples is 1, and the total weight in each
set of samples is proportional to the evidence.
Parameters
----------
logZ: array-like
log-evi... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.