Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
371,900
|
def folder_get(self, token, folder_id):
parameters = dict()
parameters[] = token
parameters[] = folder_id
response = self.request(, parameters)
return response
|
Get the attributes of the specified folder.
:param token: A valid token for the user in question.
:type token: string
:param folder_id: The id of the requested folder.
:type folder_id: int | long
:returns: Dictionary of the folder attributes.
:rtype: dict
|
371,901
|
def cmd(send, msg, args):
if not msg:
msg = gen_word()
send(gen_fullwidth(msg.upper()))
|
Converts text to fullwidth characters.
Syntax: {command} [text]
|
371,902
|
def transition_to_add(self):
assert self.state in [AQStateMachineStates.init, AQStateMachineStates.add]
self.state = AQStateMachineStates.add
|
Transition to add
|
371,903
|
def activate(paths, skip_local, skip_shared):
if not paths:
ctx = click.get_current_context()
if cpenv.get_active_env():
ctx.invoke(info)
return
click.echo(ctx.get_help())
examples = (
)
click.echo(examples)
return
if skip_local:
cpenv.module_resolvers.remove(cpenv.resolver.module_resolver)
cpenv.module_resolvers.remove(cpenv.resolver.active_env_module_resolver)
if skip_shared:
cpenv.module_resolvers.remove(cpenv.resolver.modules_path_resolver)
try:
r = cpenv.resolve(*paths)
except cpenv.ResolveError as e:
click.echo( + str(e))
return
resolved = set(r.resolved)
active_modules = set()
env = cpenv.get_active_env()
if env:
active_modules.add(env)
active_modules.update(cpenv.get_active_modules())
new_modules = resolved - active_modules
old_modules = active_modules & resolved
if old_modules and not new_modules:
click.echo(
+ bold(.join([obj.name for obj in old_modules]))
)
return
if env and contains_env(new_modules):
click.echo()
return
click.echo()
click.echo(format_objects(r.resolved))
r.activate()
click.echo(blue())
modules = sorted(resolved | active_modules, key=_type_and_name)
prompt = .join([obj.name for obj in modules])
shell.launch(prompt)
|
Activate an environment
|
371,904
|
def value(self, obj):
if self.template_name:
t = loader.select_template([self.template_name])
return t.render(Context({: obj}))
if self.eval_func:
try:
return eval(self.eval_func)
except Exception as e:
raise type(e)(.format(unicode(self), self.eval_func, unicode(e)))
elif self.model_attr:
if isinstance(obj, dict):
return obj[self.model_attr]
current_obj = getattr(obj, self.model_attr)
if callable(current_obj):
return current_obj()
else:
return current_obj
else:
raise KeyError(
.format(unicode(self)))
|
Computes the value of this field to update the index.
:param obj: object instance, as a dictionary or as a model instance.
|
371,905
|
def factorize(cls, pq):
if pq % 2 == 0:
return 2, pq // 2
y, c, m = randint(1, pq - 1), randint(1, pq - 1), randint(1, pq - 1)
g = r = q = 1
x = ys = 0
while g == 1:
x = y
for i in range(r):
y = (pow(y, 2, pq) + c) % pq
k = 0
while k < r and g == 1:
ys = y
for i in range(min(m, r - k)):
y = (pow(y, 2, pq) + c) % pq
q = q * (abs(x - y)) % pq
g = cls.gcd(q, pq)
k += m
r *= 2
if g == pq:
while True:
ys = (pow(ys, 2, pq) + c) % pq
g = cls.gcd(abs(x - ys), pq)
if g > 1:
break
p, q = g, pq // g
return (p, q) if p < q else (q, p)
|
Factorizes the given large integer.
:param pq: the prime pair pq.
:return: a tuple containing the two factors p and q.
|
371,906
|
def delete_user(self, user_id, **kwargs):
return DeleteUser(settings=self.settings, **kwargs).call(user_id=user_id, **kwargs)
|
Delete user
:param user_id: User ID
:param kwargs:
:return:
|
371,907
|
def convert_markerstyle(inputstyle, mode, inputmode=None):
mode = mode.lower()
if mode not in (, ):
raise ValueError("`{0}` is not valid `mode`".format(mode))
if inputmode is None:
if inputstyle in markerstyles_root2mpl:
inputmode =
elif inputstyle in markerstyles_mpl2root or in str(inputstyle):
inputmode =
elif inputstyle in markerstyles_text2root:
inputmode =
inputstyle = markerstyles_text2root[inputstyle]
else:
raise ValueError(
"`{0}` is not a valid `markerstyle`".format(inputstyle))
if inputmode == :
if inputstyle not in markerstyles_root2mpl:
raise ValueError(
"`{0}` is not a valid ROOT `markerstyle`".format(
inputstyle))
if mode == :
return inputstyle
return markerstyles_root2mpl[inputstyle]
else:
if in str(inputstyle):
if mode == :
return 1
else:
return inputstyle
if inputstyle not in markerstyles_mpl2root:
raise ValueError(
"`{0}` is not a valid matplotlib `markerstyle`".format(
inputstyle))
if mode == :
return inputstyle
return markerstyles_mpl2root[inputstyle]
|
Convert *inputstyle* to ROOT or matplotlib format.
Output format is determined by *mode* ('root' or 'mpl'). The *inputstyle*
may be a ROOT marker style, a matplotlib marker style, or a description
such as 'star' or 'square'.
|
371,908
|
def _pairwise(iterable):
a, b = itertools.tee(iterable)
next(b, None)
if sys.version_info.major == 2:
return itertools.izip(a, b)
else:
return zip(a, b)
|
Wrapper on itertools for SVD_magnitude.
|
371,909
|
def calculate_checksum_on_iterator(
itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
):
checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)
for chunk in itr:
checksum_calc.update(chunk)
return checksum_calc.hexdigest()
|
Calculate the checksum of an iterator.
Args:
itr: iterable
Object which supports the iterator protocol.
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
|
371,910
|
def _duplicate_queries(self, output):
if QC_SETTINGS[]:
for query, count in self.queries.most_common(QC_SETTINGS[]):
lines = [.format(count)]
lines += wrap(query)
lines = "\n".join(lines) + "\n"
output += self._colorize(lines, count)
return output
|
Appends the most common duplicate queries to the given output.
|
371,911
|
def get_forward_star(self, node):
if node not in self._node_attributes:
raise ValueError("No such node exists.")
return self._forward_star[node].copy()
|
Given a node, get a copy of that node's forward star.
:param node: node to retrieve the forward-star of.
:returns: set -- set of hyperedge_ids for the hyperedges
in the node's forward star.
:raises: ValueError -- No such node exists.
|
371,912
|
def args_length(min_len, max_len, *args):
not_null(*args)
if not all(map(lambda v: min_len <= len(v) <= max_len, args)):
raise ValueError("Argument length must be between {0} and {1}!".format(min_len, max_len))
|
检查参数长度
|
371,913
|
def nearest_vertices(self, x, y, k=1, max_distance=np.inf ):
if self.tree == False or self.tree == None:
return 0, 0
xy = np.column_stack([x, y])
dxy, vertices = self._cKDtree.query(xy, k=k, distance_upper_bound=max_distance)
if k == 1:
vertices = np.reshape(vertices, (-1, 1))
return dxy, vertices
|
Query the cKDtree for the nearest neighbours and Euclidean
distance from x,y points.
Returns 0, 0 if a cKDtree has not been constructed
(switch tree=True if you need this routine)
Parameters
----------
x : 1D array of Cartesian x coordinates
y : 1D array of Cartesian y coordinates
k : number of nearest neighbours to return
(default: 1)
max_distance : maximum Euclidean distance to search
for neighbours (default: inf)
Returns
-------
d : Euclidean distance between each point and their
nearest neighbour(s)
vert : vertices of the nearest neighbour(s)
|
371,914
|
def _footer_start_thread(self, text, time):
footerwid = urwid.AttrMap(urwid.Text(text), )
self.top.footer = footerwid
load_thread = Thread(target=self._loading_thread, args=(time,))
load_thread.daemon = True
load_thread.start()
|
Display given text in the footer. Clears after <time> seconds
|
371,915
|
def do_build_reports(directory):
for cwd, dirs, files in os.walk(directory):
for f in sorted(files):
if f in (, , , ):
job_ini = os.path.join(cwd, f)
logging.info(job_ini)
try:
reportwriter.build_report(job_ini, cwd)
except Exception as e:
logging.error(str(e))
|
Walk the directory and builds pre-calculation reports for all the
job.ini files found.
|
371,916
|
def get_joke():
page = requests.get("https://api.chucknorris.io/jokes/random")
if page.status_code == 200:
joke = json.loads(page.content.decode("UTF-8"))
return joke["value"]
return None
|
Returns a joke from the WebKnox one liner API.
Returns None if unable to retrieve a joke.
|
371,917
|
def merge(args):
from jcvi.formats.base import DictFile
p = OptionParser(merge.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
quartets, registry, lost = args
qq = DictFile(registry, keypos=1, valuepos=3)
lost = DictFile(lost, keypos=1, valuepos=0, delimiter=)
qq.update(lost)
fp = open(quartets)
cases = {
"AN,CN": 4,
"BO,AN,CN": 8,
"BO,CN": 2,
"BR,AN": 1,
"BR,AN,CN": 6,
"BR,BO": 3,
"BR,BO,AN": 5,
"BR,BO,AN,CN": 9,
"BR,BO,CN": 7,
}
ip = {
"syntenic_model": "Syntenic_model_excluded_by_OMG",
"complete": "Predictable",
"partial": "Truncated",
"pseudogene": "Pseudogene",
"random": "Match_random",
"real_ns": "Transposed",
"gmap_fail": "GMAP_fail",
"AN LOST": "AN_LOST",
"CN LOST": "CN_LOST",
"BR LOST": "BR_LOST",
"BO LOST": "BO_LOST",
"outside": "Outside_synteny_blocks",
"[NF]": "Not_found",
}
for row in fp:
atoms = row.strip().split("\t")
genes = atoms[:4]
tag = atoms[4]
a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes]
qqs = [c, d, a, b]
for i, q in enumerate(qqs):
if atoms[i] != :
qqs[i] = "syntenic_model"
comment = "Case{0}".format(cases[tag])
dots = sum([1 for x in genes if x == ])
if dots == 1:
idx = genes.index(".")
status = qqs[idx]
status = ip[status]
comment += "-" + status
print(row.strip() + "\t" + "\t".join(qqs + [comment]))
|
%prog merge protein-quartets registry LOST
Merge protein quartets table with dna quartets registry. This is specific
to the napus project.
|
371,918
|
def add_dynamic_kb(kbname, tag, collection="", searchwith=""):
kb_id = add_kb(kb_name=kbname, kb_type=)
save_kb_dyn_config(kb_id, tag, searchwith, collection)
return kb_id
|
A convenience method.
|
371,919
|
def create(self, using=None, **kwargs):
self._get_connection(using).indices.create(index=self._name, body=self.to_dict(), **kwargs)
|
Creates the index in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.create`` unchanged.
|
371,920
|
def _phase_kuramoto(self, teta, t, argv):
index = argv;
phase = 0.0;
neighbors = self.get_neighbors(index);
for k in neighbors:
conn_weight = 1.0;
if (self._ena_conn_weight is True):
conn_weight = self._conn_weight[index][k];
phase += conn_weight * self._weight * math.sin(self._phases[k] - teta);
divider = len(neighbors);
if (divider == 0):
divider = 1.0;
return ( self._freq[index] + (phase / divider) );
|
!
@brief Overrided method for calculation of oscillator phase.
@param[in] teta (double): Current value of phase.
@param[in] t (double): Time (can be ignored).
@param[in] argv (uint): Index of oscillator whose phase represented by argument teta.
@return (double) New value of phase of oscillator with index 'argv'.
|
371,921
|
def series(self):
data = self.values()
if len(data):
for c in range(self.count()):
yield data[:, c]
else:
raise StopIteration
|
Generator of single series data (no dates are included).
|
371,922
|
def requestSchema(self, nym, name, version, sender):
operation = { TARGET_NYM: nym,
TXN_TYPE: GET_SCHEMA,
DATA: {NAME : name,
VERSION: version}
}
req = Request(sender, operation=operation)
return self.prepReq(req)
|
Used to get a schema from Sovrin
:param nym: nym that schema is attached to
:param name: name of schema
:param version: version of schema
:return: req object
|
371,923
|
def is_read_only(p_command):
read_only_commands = tuple(cmd for cmd
in (, ) + READ_ONLY_COMMANDS)
return p_command.name() in read_only_commands
|
Returns True when the given command class is read-only.
|
371,924
|
def selection_error_control(self, form_info):
keys, names = self.return_selected_form_items(form_info[])
chosen_channels_number = len(keys)
if form_info[] and chosen_channels_number < 2:
return False, _(
u"You should choose at least two channel to merge operation at a new channel.")
elif form_info[] and chosen_channels_number == 0:
return False, _(
u"You should choose at least one channel to merge operation with existing channel.")
elif form_info[] and chosen_channels_number != 1:
return False, _(u"You should choose one channel for split operation.")
return True, None
|
It controls the selection from the form according
to the operations, and returns an error message
if it does not comply with the rules.
Args:
form_info: Channel or subscriber form from the user
Returns: True or False
error message
|
371,925
|
def add_node(self, node):
self.nodes.append(node)
for x in xrange(self.replicas):
ring_key = self.hash_method(b("%s:%d" % (node, x)))
self.ring[ring_key] = node
self.sorted_keys.append(ring_key)
self.sorted_keys.sort()
|
Adds a `node` to the hash ring (including a number of replicas).
|
371,926
|
def sorted_items(d, key=__identity, reverse=False):
def pairkey_key(item):
return key(item[0])
return sorted(d.items(), key=pairkey_key, reverse=reverse)
|
Return the items of the dictionary sorted by the keys
>>> sample = dict(foo=20, bar=42, baz=10)
>>> tuple(sorted_items(sample))
(('bar', 42), ('baz', 10), ('foo', 20))
>>> reverse_string = lambda s: ''.join(reversed(s))
>>> tuple(sorted_items(sample, key=reverse_string))
(('foo', 20), ('bar', 42), ('baz', 10))
>>> tuple(sorted_items(sample, reverse=True))
(('foo', 20), ('baz', 10), ('bar', 42))
|
371,927
|
def table_path(cls, project, instance, table):
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/tables/{table}",
project=project,
instance=instance,
table=table,
)
|
Return a fully-qualified table string.
|
371,928
|
def debug(self, *msg):
label = colors.yellow("DEBUG")
self._msg(label, *msg)
|
Prints a warning
|
371,929
|
def get_query_rows(self, job_id, offset=None, limit=None, timeout=0):
query_reply = self.get_query_results(job_id, offset=offset,
limit=limit, timeout=timeout)
if not query_reply[]:
logger.warning( % job_id)
raise UnfinishedQueryException()
schema = query_reply["schema"]["fields"]
rows = query_reply.get(, [])
page_token = query_reply.get("pageToken")
records = [self._transform_row(row, schema) for row in rows]
while page_token and (not limit or len(records) < limit):
query_reply = self.get_query_results(
job_id, offset=offset, limit=limit, page_token=page_token,
timeout=timeout)
page_token = query_reply.get("pageToken")
rows = query_reply.get(, [])
records += [self._transform_row(row, schema) for row in rows]
return records[:limit] if limit else records
|
Retrieve a list of rows from a query table by job id.
This method will append results from multiple pages together. If you
want to manually page through results, you can use `get_query_results`
method directly.
Parameters
----------
job_id : str
The job id that references a BigQuery query.
offset : int, optional
The offset of the rows to pull from BigQuery
limit : int, optional
The number of rows to retrieve from a query table.
timeout : float, optional
Timeout in seconds.
Returns
-------
list
A ``list`` of ``dict`` objects that represent table rows.
|
371,930
|
def from_perseus(network_table, networks):
graphs = []
for guid, graph_attr in zip(network_table[], network_table.values):
network = networks[guid]
edge_table = network[]
if edge_table[[, ]].duplicated().any():
warnings.warn(.format(network[]))
G = nx.from_pandas_edgelist(edge_table, , , True, create_using=nx.DiGraph())
for attr, value in zip(network_table.columns, graph_attr):
G.graph[attr] = value
node_table = network[]
if node_table[].duplicated().any():
warnings.warn(.format(network[]))
node_column = node_table[]
for name, attributes in zip(node_column, node_table.values):
if name not in G:
G.add_node(name)
for attr, value in zip(node_table.columns, attributes):
G.node[name][attr] = value
graphs.append(G)
return graphs
|
Create networkx graph from network tables
>>> from perseuspy import read_networks, nx
>>> network_table, networks = read_networks(folder)
>>> graphs = nx.from_perseus(network_table, networks)
|
371,931
|
def compute_key(cli, familly, discriminant=None):
hash_key = hashlib.sha256()
hash_key.update(familly)
hash_key.update(cli.host)
hash_key.update(cli.user)
hash_key.update(cli.password)
if discriminant:
if isinstance(discriminant, list):
for i in discriminant:
if i is not None and i is not False:
hash_key.update(str(i))
elif isinstance(discriminant, tuple):
for i in discriminant:
if i is not None and i is not False:
hash_key.update(str(i))
else:
hash_key.update(discriminant)
hash_key = hash_key.hexdigest()
cli.log.debug("hash_key: " + hash_key)
return hash_key
|
This function is used to compute a unique key from all connection parametters.
|
371,932
|
def normalize_url(url):
if url.endswith():
url = url[:-5]
if url.endswith():
url = url[:-1]
return url
|
Return url after stripping trailing .json and trailing slashes.
|
371,933
|
def GetData(ID, season = None, cadence = , clobber = False, delete_raw = False,
aperture_name = None, saturated_aperture_name = None,
max_pixels = None, download_only = False, saturation_tolerance = None,
bad_bits = None, **kwargs):
raise NotImplementedError()
|
Returns a :py:obj:`DataContainer` instance with the raw data for the target.
:param int ID: The target ID number
:param int season: The observing season. Default :py:obj:`None`
:param str cadence: The light curve cadence. Default `lc`
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param bool delete_raw: Delete the FITS TPF after processing it? Default :py:obj:`False`
:param str aperture_name: The name of the aperture to use. Select `custom` to call \
:py:func:`GetCustomAperture`. Default :py:obj:`None`
:param str saturated_aperture_name: The name of the aperture to use if the target is \
saturated. Default :py:obj:`None`
:param int max_pixels: Maximum number of pixels in the TPF. Default :py:obj:`None`
:param bool download_only: Download raw TPF and return? Default :py:obj:`False`
:param float saturation_tolerance: Target is considered saturated if flux is within \
this fraction of the pixel well depth. Default :py:obj:`None`
:param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider outliers when \
computing the model. Default :py:obj:`None`
|
371,934
|
def _from_docstring_rst(doc):
def format_fn(line, status):
if re_from_data.match(line):
line = re_from_data.sub(r"**\1** ", line)
status["add_line"] = True
line = re_from_defaults.sub(r"*\1*", line)
if status["listing"]:
if re_from_param.match(line):
m = re_from_param.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
elif re_from_status.match(line):
m = re_from_status.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
elif re_from_item.match(line):
line = re_from_item.sub(r" -", line)
else:
line = " " * 4 + line.lstrip()
line = re_lone_backtick.sub("``", line)
return line
return _reformat_docstring(doc, format_fn, code_newline="\n")
|
format from docstring to ReStructured Text
|
371,935
|
def insert(self, row):
data = self._convert_value(row)
self._service.InsertRow(data, self._ss.id, self.id)
|
Insert a new row. The row will be added to the end of the
spreadsheet. Before inserting, the field names in the given
row will be normalized and values with empty field names
removed.
|
371,936
|
def verify_psd_options_multi_ifo(opt, parser, ifos):
for ifo in ifos:
for opt_group in ensure_one_opt_groups:
ensure_one_opt_multi_ifo(opt, parser, ifo, opt_group)
if opt.psd_estimation[ifo]:
required_opts_multi_ifo(opt, parser, ifo,
[, ],
required_by = "--psd-estimation")
|
Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
|
371,937
|
def create_ngram_set(input_list, ngram_value=2):
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
|
Extract a set of n-grams from a list of integers.
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
{(4, 9), (4, 1), (1, 4), (9, 4)}
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
[(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
|
371,938
|
def get_proficiencies_by_genus_type(self, proficiency_genus_type):
collection = JSONClientValidated(,
collection=,
runtime=self._runtime)
result = collection.find(
dict({: str(proficiency_genus_type)},
**self._view_filter())).sort(, DESCENDING)
return objects.ProficiencyList(result, runtime=self._runtime, proxy=self._proxy)
|
Gets a ``ProficiencyList`` corresponding to the given proficiency genus ``Type`` which does not include proficiencies of types derived from the specified ``Type``.
arg: proficiency_genus_type (osid.type.Type): a proficiency
genus type
return: (osid.learning.ProficiencyList) - the returned
``Proficiency`` list
raise: NullArgument - ``proficiency_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
371,939
|
def generator(self, output, target):
"Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`."
fake_pred = self.gan_model.critic(output)
return self.loss_funcG(fake_pred, target, output)
|
Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`.
|
371,940
|
def current_site_id():
if hasattr(override_current_site_id.thread_local, "site_id"):
return override_current_site_id.thread_local.site_id
from yacms.utils.cache import cache_installed, cache_get, cache_set
request = current_request()
site_id = getattr(request, "site_id", None)
if request and not site_id:
site_id = request.session.get("site_id", None)
if not site_id:
domain = request.get_host().lower()
if cache_installed():
bits = (settings.CACHE_MIDDLEWARE_KEY_PREFIX, domain)
cache_key = "%s.site_id.%s" % bits
site_id = cache_get(cache_key)
if not site_id:
try:
site = Site.objects.get(domain__iexact=domain)
except Site.DoesNotExist:
pass
else:
site_id = site.id
if cache_installed():
cache_set(cache_key, site_id)
if not site_id:
site_id = os.environ.get("YACMS_SITE_ID", settings.SITE_ID)
if request and site_id and not getattr(settings, "TESTING", False):
request.site_id = site_id
return site_id
|
Responsible for determining the current ``Site`` instance to use
when retrieving data for any ``SiteRelated`` models. If we're inside an
override_current_site_id context manager, return the overriding site ID.
Otherwise, try to determine the site using the following methods in order:
- ``site_id`` in session. Used in the admin so that admin users
can switch sites and stay on the same domain for the admin.
- The id of the Site object corresponding to the hostname in the current
request. This result is cached.
- ``YACMS_SITE_ID`` environment variable, so management
commands or anything else outside of a request can specify a
site.
- ``SITE_ID`` setting.
If a current request exists and the current site is not overridden, the
site ID is stored on the request object to speed up subsequent calls.
|
371,941
|
def config_path(self, value):
self._config_path = value or
if not isinstance(self._config_path, str):
raise BadArgumentError("config_path must be string: {}".format(
self._config_path))
|
Set config_path
|
371,942
|
def reset_coords(self, names=None, drop=False, inplace=None):
inplace = _check_inplace(inplace)
if names is None:
names = self._coord_names - set(self.dims)
else:
if isinstance(names, str):
names = [names]
self._assert_all_in_dataset(names)
bad_coords = set(names) & set(self.dims)
if bad_coords:
raise ValueError(
% bad_coords)
obj = self if inplace else self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj
|
Given names of coordinates, reset them to become variables
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
|
371,943
|
def lv_load_areas(self):
for load_area in sorted(self._lv_load_areas, key=lambda _: repr(_)):
yield load_area
|
Returns a generator for iterating over load_areas
Yields
------
int
generator for iterating over load_areas
|
371,944
|
def get_least_salient_words(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None):
return _words_by_salience_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n, least_to_most=True)
|
Order the words from `vocab` by "saliency score" (Chuang et al. 2012) from least to most salient. Optionally only
return the `n` least salient words.
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
|
371,945
|
def binary_regex(self):
regex = {: r,
: r,
: r,
: r,
:
r,
:
r,
}
return regex[self.platform] % {
: APPLICATIONS_TO_BINARY_NAME.get(self.application, self.application),
: self.extension,
: if self.is_stub_installer else ,
: if self.is_stub_installer else ,
: self.version,
}
|
Return the regex for the binary.
|
371,946
|
def gain_offsets(Idat,Qdat,Udat,Vdat,tsamp,chan_per_coarse,feedtype=,**kwargs):
if feedtype==:
I_OFF,I_ON = foldcal(Idat,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Qdat,tsamp,**kwargs)
XX_ON = (I_ON+Q_ON)/2
XX_OFF = (I_OFF+Q_OFF)/2
YY_ON = (I_ON-Q_ON)/2
YY_OFF = (I_OFF-Q_OFF)/2
G = (XX_OFF-YY_OFF)/(XX_OFF+YY_OFF)
if feedtype==:
I_OFF,I_ON = foldcal(Idat,tsamp,**kwargs)
V_OFF,V_ON = foldcal(Vdat,tsamp,**kwargs)
RR_ON = (I_ON+V_ON)/2
RR_OFF = (I_OFF+V_OFF)/2
LL_ON = (I_ON-V_ON)/2
LL_OFF = (I_OFF-V_OFF)/2
G = (RR_OFF-LL_OFF)/(RR_OFF+LL_OFF)
return convert_to_coarse(G,chan_per_coarse)
|
Determines relative gain error in the X and Y feeds for an
observation given I and Q (I and V for circular basis) noise diode data.
|
371,947
|
def export_partlist_to_file(input, output, timeout=20, showgui=False):
input = norm_path(input)
output = norm_path(output)
commands = export_command(output=output, output_type=)
command_eagle(
input=input, timeout=timeout, commands=commands, showgui=showgui)
|
call eagle and export sch or brd to partlist text file
:param input: .sch or .brd file name
:param output: text file name
:param timeout: int
:param showgui: Bool, True -> do not hide eagle GUI
:rtype: None
|
371,948
|
def _from_dict(cls, _dict):
args = {}
if in _dict:
args[] = MessageContextGlobal._from_dict(
_dict.get())
if in _dict:
args[] = MessageContextSkills._from_dict(
_dict.get())
return cls(**args)
|
Initialize a MessageContext object from a json dictionary.
|
371,949
|
def fetch(self, fetch_notes=None):
if fetch_notes is None:
fetch_notes = self.fetch_notes
values, notes_index = get_sheet_values(self.name, self.sheet_name,
spreadsheet_service=self._spreadsheet_service,
get_notes=fetch_notes)
self.raw_values = values
self.values = [list(r) for r in zip(*itertools.zip_longest(*self.raw_values, fillvalue=))]
self.byCol = byCol(self.values, to_index=self.index_columns)
self.notes_index = notes_index
|
update remote values (called automatically at __init__)
|
371,950
|
def from_xy_array(cls, xy, shape):
keypoints = [Keypoint(x=coord[0], y=coord[1]) for coord in xy]
return KeypointsOnImage(keypoints, shape)
|
Convert an array (N,2) with a given image shape to a KeypointsOnImage object.
Parameters
----------
xy : (N, 2) ndarray
Coordinates of ``N`` keypoints on the original image, given
as ``(N,2)`` array of xy-coordinates.
shape : tuple of int or ndarray
Shape tuple of the image on which the keypoints are placed.
Returns
-------
KeypointsOnImage
KeypointsOnImage object that contains all keypoints from the array.
|
371,951
|
def with_bundler(self):
def gemfile_exists():
return os.path.exists()
if in os.environ:
print(colored(
s config system. To
set it globally, run this command anywhere:
git config --global git-up.bundler.check true
To set it within a project, run this command inside that
projecttruefalse, ))
if self.settings[]:
return gemfile_exists()
if ( in os.environ
and os.environ[] == ):
return gemfile_exists()
return False
|
Check, if bundler check is requested.
Check, if the user wants us to check for new gems and return True in
this case.
:rtype : bool
|
371,952
|
def setCol(self, x, l):
for i in xrange(0, self.__size):
self.setCell(x, i, l[i])
|
set the x-th column, starting at 0
|
371,953
|
def parse_cell(self, cell):
field =
if (isinstance(cell.value, (str, unicode)) and
cell.value.startswith() and
cell.value.endswith()):
field = cell.value[2:-2].strip()
value =
else:
value = cell.value
return value, field
|
Process cell field, the field format just like {{field}}
:param cell:
:return: value, field
|
371,954
|
def make_spark_lines(table,filename,sc,**kwargs):
spark_output = True
lines_out_count = False
extrema = False
for key,value in kwargs.iteritems():
if key == :
lines_out_count = value
if key == :
extrema = value
list = []
count = 0
for row in table.columns.values.tolist():
if in row:
list.append(count)
count += 1
table.drop(table.columns[list], axis=1, inplace=True)
if lines_out_count == False:
args = make_spark_args(table,25,lines_out = True,extrema=extrema)
else:
args = make_spark_args(table,25,lines_out_count=lines_out_count)
concurrent = sc.parallelize(args)
table = concurrent.map(map_spark_lines).collect()
alignment_fieldspark_output
lines%s.geojson
\t},\t},[%s/%s]
|
alignment_field = False
spark_output = True
if kwargs is not None:
for key,value in kwargs.iteritems():
if key == 'alignment_field':
alignment_field = value
if key == 'spark_output':
spark_output = value
#changing dataframe to list if dataframe
if isinstance(table,pd.DataFrame):
table=df2list(table)
header=table[0]
total = []
# making table the proper iterable for each input
if spark_output == True:
#table = sum(table,[])
pass
else:
table = table[1:]
|
371,955
|
def to_molden(cartesian_list, buf=None, sort_index=True,
overwrite=True, float_format=.format):
if sort_index:
cartesian_list = [molecule.sort_index() for molecule in cartesian_list]
give_header = ("[MOLDEN FORMAT]\n"
+ "[N_GEO]\n"
+ str(len(cartesian_list)) + "\n"
+
+
+
+
+ ).format
values = len(cartesian_list) *
energy = [str(m.metadata.get(, 1)) for m in cartesian_list]
energy = .join(energy) +
header = give_header(energy=energy, max_force=values, rms_force=values)
coordinates = [x.to_xyz(sort_index=sort_index, float_format=float_format)
for x in cartesian_list]
output = header + .join(coordinates)
if buf is not None:
if overwrite:
with open(buf, mode=) as f:
f.write(output)
else:
with open(buf, mode=) as f:
f.write(output)
else:
return output
|
Write a list of Cartesians into a molden file.
.. note:: Since it permamently writes a file, this function
is strictly speaking **not sideeffect free**.
The list to be written is of course not changed.
Args:
cartesian_list (list):
buf (str): StringIO-like, optional buffer to write to
sort_index (bool): If sort_index is true, the Cartesian
is sorted by the index before writing.
overwrite (bool): May overwrite existing files.
float_format (one-parameter function): Formatter function
to apply to column’s elements if they are floats.
The result of this function must be a unicode string.
Returns:
formatted : string (or unicode, depending on data and options)
|
371,956
|
def _dens(self,R,z,phi=0.,t=0.):
return 3./4./nu.pi*self._b2*(R**2.+z**2.+self._b2)**-2.5
|
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2015-06-15 - Written - Bovy (IAS)
|
371,957
|
def _rotate(lon, lat, theta, axis=):
lon, lat = np.atleast_1d(lon, lat)
lon, lat = map(np.radians, [lon, lat])
theta = np.radians(theta)
x, y, z = sph2cart(lon, lat)
lookup = {:_rotate_x, :_rotate_y, :_rotate_z}
X, Y, Z = lookup[axis](x, y, z, theta)
lon, lat = cart2sph(X,Y,Z)
return lon, lat
|
Rotate "lon", "lat" coords (in _degrees_) about the X-axis by "theta"
degrees. This effectively simulates rotating a physical stereonet.
Returns rotated lon, lat coords in _radians_).
|
371,958
|
def get_de_novos_in_transcript(transcript, de_novos):
in_transcript = []
for de_novo in de_novos:
in_transcript.append(de_novo)
return in_transcript
|
get the de novos within the coding sequence of a transcript
Args:
transcript: Transcript object, which defines the transcript coordinates
de_novos: list of chromosome sequence positions for de novo events
Returns:
list of de novo positions found within the transcript
|
371,959
|
def less_than(self, less_than):
if hasattr(less_than, ):
less_than = datetime_as_utc(less_than).strftime()
elif isinstance(less_than, six.string_types):
raise QueryTypeError( % type(less_than))
return self._add_condition(, less_than, types=[int, str])
|
Adds new `<` condition
:param less_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime)
:raise:
- QueryTypeError: if `less_than` is of an unexpected type
|
371,960
|
def read_object_from_yaml(desired_type: Type[Any], file_object: TextIOBase, logger: Logger,
fix_imports: bool = True, errors: str = , *args, **kwargs) -> Any:
return yaml.load(file_object)
|
Parses a yaml file.
:param desired_type:
:param file_object:
:param logger:
:param fix_imports:
:param errors:
:param args:
:param kwargs:
:return:
|
371,961
|
def populate_dataframe(index,columns, default_dict, dtype):
new_df = pd.DataFrame(index=index,columns=columns)
for fieldname,dt in zip(columns,dtype.descr):
default = default_dict[fieldname]
new_df.loc[:,fieldname] = default
new_df.loc[:,fieldname] = new_df.loc[:,fieldname].astype(dt[1])
return new_df
|
helper function to populate a generic Pst dataframe attribute. This
function is called as part of constructing a generic Pst instance
Parameters
----------
index : (varies)
something to use as the dataframe index
columns: (varies)
something to use as the dataframe columns
default_dict : (dict)
dictionary of default values for columns
dtype : numpy.dtype
dtype used to cast dataframe columns
Returns
-------
new_df : pandas.DataFrame
|
371,962
|
def fn_floor(self, value):
if is_ndarray(value) or isinstance(value, (list, tuple)):
return numpy.floor(self._to_ndarray(value))
else:
return math.floor(value)
|
Return the floor of a number. For negative numbers, floor returns a lower value. E.g., `floor(-2.5) == -3`
:param value: The number.
:return: The floor of the number.
|
371,963
|
def zan(self, id_reply):
logger.info(.format(id_reply))
MReply2User.create_reply(self.userinfo.uid, id_reply)
cur_count = MReply2User.get_voter_count(id_reply)
if cur_count:
MReply.update_vote(id_reply, cur_count)
output = {: cur_count}
else:
output = {: 0}
logger.info(.format(cur_count))
return json.dump(output, self)
|
先在外部表中更新,然后更新内部表字段的值。
有冗余,但是查看的时候避免了联合查询
|
371,964
|
def get_template_debug(template_name, error):
}
|
This structure is what Django wants when errors occur in templates.
It gives the user a nice stack trace in the error page during debug.
|
371,965
|
def update_report_collector(self, timestamp):
report_enabled = in self.information and self.information[] ==
report_enabled = report_enabled and in self.information
report_enabled = report_enabled and Event.collector_queue is not None
if report_enabled:
Event.collector_queue.put(CollectorUpdate(
matrix=self.information[] if in self.information else ,
stage=self.information[],
status=self.status,
timestamp=timestamp,
information=self.information
))
|
Updating report collector for pipeline details.
|
371,966
|
def warm_spell_duration_index(tasmax, tx90, window=6, freq=):
r
if not in tx90.coords.keys():
raise AttributeError("tx90 should have dayofyear coordinates.")
doy = tasmax.indexes[].dayofyear
tx90 = utils.adjust_doy_calendar(tx90, tasmax)
thresh = xr.full_like(tasmax, np.nan)
thresh.data = tx90.sel(dayofyear=doy)
above = (tasmax > thresh)
return above.resample(time=freq).apply(rl.windowed_run_count, window=window, dim=)
|
r"""Warm spell duration index
Number of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile. The 90th percentile should be computed for a 5-day window centred on each calendar day in the
1961-1990 period.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [℃] or [K]
tx90 : float
90th percentile of daily maximum temperature [℃] or [K]
window : int
Minimum number of days with temperature below threshold to qualify as a warm spell.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Count of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile [days].
References
----------
From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI).
Used in Alexander, L. V., et al. (2006), Global observed changes in daily climate extremes of temperature and
precipitation, J. Geophys. Res., 111, D05109, doi: 10.1029/2005JD006290.
|
371,967
|
def fit(self, xy=False, **kwargs):
kwargs.setdefault(, self.tpr)
kwargs.setdefault(, self.ndx)
kwargs[] = self.xtc
force = kwargs.pop(, self.force)
if xy:
fitmode =
kwargs.pop(, None)
infix_default =
else:
fitmode = kwargs.pop(, )
infix_default =
dt = kwargs.get()
if dt:
infix_default += .format(int(dt))
kwargs.setdefault(, self.outfile(self.infix_filename(None, self.xtc, infix_default, )))
fitgroup = kwargs.pop(, )
kwargs.setdefault(, [fitgroup, "system"])
if kwargs.get(, False):
logger.warn("Transformer.fit(): center=%(center)r used: centering should not be combined with fitting.", kwargs)
if len(kwargs[]) != 3:
logger.error("If you insist on centering you must provide three groups in the kwarg: (center, fit, output)")
raise ValuError("Insufficient index groups for centering,fitting,output")
logger.info("Fitting trajectory %r to with xy=%r...", kwargs[], xy)
logger.info("Fitting on index group %(fitgroup)r", vars())
with utilities.in_dir(self.dirname):
if self.check_file_exists(kwargs[], resolve="indicate", force=force):
logger.warn("File %r exists; force regenerating it with force=True.", kwargs[])
else:
gromacs.trjconv(fit=fitmode, **kwargs)
logger.info("Fitted trajectory (fitmode=%s): %r.", fitmode, kwargs[])
return {: self.rp(kwargs[]), : self.rp(kwargs[])}
|
Write xtc that is fitted to the tpr reference structure.
Runs :class:`gromacs.tools.trjconv` with appropriate arguments
for fitting. The most important *kwargs* are listed
here but in most cases the defaults should work.
Note that the default settings do *not* include centering or
periodic boundary treatment as this often does not work well
with fitting. It is better to do this as a separate step (see
:meth:`center_fit` or :func:`gromacs.cbook.trj_fitandcenter`)
:Keywords:
*s*
Input structure (typically the default tpr file but can be set to
some other file with a different conformation for fitting)
*n*
Alternative index file.
*o*
Name of the output trajectory. A default name is created.
If e.g. *dt* = 100 is one of the *kwargs* then the default name includes
"_dt100ps".
*xy* : boolean
If ``True`` then only do a rot+trans fit in the xy plane
(good for membrane simulations); default is ``False``.
*force*
``True``: overwrite existing trajectories
``False``: throw a IOError exception
``None``: skip existing and log a warning [default]
*fitgroup*
index group to fit on ["backbone"]
.. Note:: If keyword *input* is supplied then it will override
*fitgroup*; *input* = ``[fitgroup, outgroup]``
*kwargs*
kwargs are passed to :func:`~gromacs.cbook.trj_xyfitted`
:Returns:
dictionary with keys *tpr*, *xtc*, which are the names of the
the new files
|
371,968
|
def usergroups_users_update(
self, *, usergroup: str, users: List[str], **kwargs
) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({"usergroup": usergroup, "users": users})
return self.api_call("usergroups.users.update", json=kwargs)
|
Update the list of users for a User Group
Args:
usergroup (str): The encoded ID of the User Group to update.
e.g. 'S0604QSJC'
users (list): A list user IDs that represent the entire list of
users for the User Group. e.g. ['U060R4BJ4', 'U060RNRCZ']
|
371,969
|
def _bucket_key(self):
return "{}.size.{}".format(
self.prefix, (self._hashed_key//1000)
if self._hashed_key > 1000 else self._hashed_key)
|
Returns hash bucket key for the redis key
|
371,970
|
def func(nargs: Optional[int] = None, nouts: Optional[int] = None, ndefs: Optional[int] = None):
return lambda f: wraps(f)(WrappedFunction(f, nargs=nargs, nouts=nouts, ndefs=ndefs))
|
decorates normal function to Function with (optional) number of arguments and outputs.
: func(nargs: Optional[int] = None, nouts: Optional[int] = None, ndefs: Optional[int] = None)
|
371,971
|
def from_backend(self, dagobah_id):
logger.debug(.format(dagobah_id))
rec = self.backend.get_dagobah_json(dagobah_id)
if not rec:
raise DagobahError(
% dagobah_id)
self._construct_from_json(rec)
|
Reconstruct this Dagobah instance from the backend.
|
371,972
|
def healthy(self):
try:
if self.is_healthy():
return "OK", 200
else:
return "FAIL", 500
except Exception as e:
self.app.logger.exception(e)
return str(e), 500
|
Return 200 is healthy, else 500.
Override is_healthy() to change the health check.
|
371,973
|
def max_insertion(seqs, gene, domain):
seqs = [i[2] for i in list(seqs.values()) if i[2] != [] and i[0] == gene and i[1] == domain]
lengths = []
for seq in seqs:
for ins in seq:
lengths.append(int(ins[2]))
if lengths == []:
return 100
return max(lengths)
|
length of largest insertion
|
371,974
|
def calc_svd(self, lapack_driver=):
if self._isSpectral():
msg = "svd not implemented yet for spectral data class"
raise Exception(msg)
chronos, s, topos = _comp.calc_svd(self.data, lapack_driver=lapack_driver)
return u, s, v
|
Return the SVD decomposition of data
The input data np.ndarray shall be of dimension 2,
with time as the first dimension, and the channels in the second
Hence data should be of shape (nt, nch)
Uses scipy.linalg.svd(), with:
full_matrices = True
compute_uv = True
overwrite_a = False
check_finite = True
See scipy online doc for details
Return
------
chronos: np.ndarray
First arg (u) returned by scipy.linalg.svd()
Contains the so-called 'chronos', of shape (nt, nt)
i.e.: the time-dependent part of the decoposition
s: np.ndarray
Second arg (s) returned by scipy.linalg.svd()
Contains the singular values, of shape (nch,)
i.e.: the channel-dependent part of the decoposition
topos: np.ndarray
Third arg (v) returned by scipy.linalg.svd()
Contains the so-called 'topos', of shape (nch, nch)
i.e.: the channel-dependent part of the decoposition
|
371,975
|
async def toggle(self):
self.logger.debug("toggle command")
if not self.state == :
return
if self.streamer is None:
return
try:
if self.streamer.is_playing():
await self.pause()
else:
await self.resume()
except Exception as e:
logger.error(e)
pass
|
Toggles between pause and resume command
|
371,976
|
def __normalize_args(**keywds):
if isinstance(keywds[], Callable) and \
None is keywds[]:
keywds[] = keywds[]
keywds[] = None
return keywds
|
implementation details
|
371,977
|
def save_output_meta(self):
options = self.options
file_path = os.path.join(options.outputdir, )
with open(file_path, ) as outfile:
json.dump(self.OUTPUT_META_DICT, outfile)
|
Save descriptive output meta data to a JSON file.
|
371,978
|
def skull_strip(dset,suffix=,prefix=None,unifize=True):
if prefix==None:
prefix = nl.suffix(dset,suffix)
unifize_dset = nl.suffix(dset,)
cmd = bet2 if bet2 else
if unifize:
info = nl.dset_info(dset)
if info==None:
nl.notify( % dset,level=nl.level.error)
return False
cmd = os.path.join(fsl_dir,cmd) if fsl_dir else cmd
cutoff_value = nl.max(dset) * 0.05
nl.run([,,unifize_dset,nl.calc(dset, % cutoff_value)],products=unifize_dset)
else:
unifize_dset = dset
nl.run([cmd,unifize_dset,prefix,,0.5],products=prefix)
|
use bet to strip skull from given anatomy
|
371,979
|
def dist_calc(loc1, loc2):
R = 6371.009
dlat = np.radians(abs(loc1[0] - loc2[0]))
dlong = np.radians(abs(loc1[1] - loc2[1]))
ddepth = abs(loc1[2] - loc2[2])
mean_lat = np.radians((loc1[0] + loc2[0]) / 2)
dist = R * np.sqrt(dlat ** 2 + (np.cos(mean_lat) * dlong) ** 2)
dist = np.sqrt(dist ** 2 + ddepth ** 2)
return dist
|
Function to calculate the distance in km between two points.
Uses the flat Earth approximation. Better things are available for this,
like `gdal <http://www.gdal.org/>`_.
:type loc1: tuple
:param loc1: Tuple of lat, lon, depth (in decimal degrees and km)
:type loc2: tuple
:param loc2: Tuple of lat, lon, depth (in decimal degrees and km)
:returns: Distance between points in km.
:rtype: float
|
371,980
|
def get_user_choice(items):
q
choice = raw_input()
while choice != :
try:
item = items[int(choice)]
print
return item
except ValueError:
return None
|
Returns the selected item from provided items or None if 'q' was
entered for quit.
|
371,981
|
def shell_split(text):
assert is_text_string(text)
pattern = r.*?(?<!\\)\
out = []
for token in re.split(pattern, text):
if token.strip():
out.append(token.strip().strip("'"))
return out
|
Split the string `text` using shell-like syntax
This avoids breaking single/double-quoted strings (e.g. containing
strings with spaces). This function is almost equivalent to the shlex.split
function (see standard library `shlex`) except that it is supporting
unicode strings (shlex does not support unicode until Python 2.7.3).
|
371,982
|
def write(self, buf):
assert self.writable
self._check_pid(allow_reset=False)
check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle,
ctypes.c_char_p(buf),
ctypes.c_size_t(len(buf))))
|
Inserts a string buffer as a record.
Examples
---------
>>> record = mx.recordio.MXRecordIO('tmp.rec', 'w')
>>> for i in range(5):
... record.write('record_%d'%i)
>>> record.close()
Parameters
----------
buf : string (python2), bytes (python3)
Buffer to write.
|
371,983
|
def groups(self, labels, collect=None):
if not _is_non_string_iterable(labels):
return self.group(labels, collect=collect)
collect = _zero_on_type_error(collect)
columns = []
labels = self._as_labels(labels)
for label in labels:
if label not in self.labels:
raise ValueError("All labels must exist in the table")
columns.append(self._get_column(label))
grouped = self.group(list(zip(*columns)), lambda s: s)
grouped._columns.popitem(last=False)
counts = [len(v) for v in grouped[0]]
for label in labels[::-1]:
grouped[label] = grouped.apply(_assert_same, label)
grouped.move_to_start(label)
if collect is None:
count = if not in labels else self._unused_label()
return grouped.select(labels).with_column(count, counts)
else:
for label in grouped.labels:
if label in labels:
continue
column = [collect(v) for v in grouped[label]]
del grouped[label]
grouped[_collected_label(collect, label)] = column
return grouped
|
Group rows by multiple columns, count or aggregate others.
Args:
``labels``: list of column names (or indices) to group on
``collect``: a function applied to values in other columns for each group
Returns: A Table with each row corresponding to a unique combination of values in
the columns specified in ``labels``, where the first columns are those
specified in ``labels``, followed by a column of counts for each of the unique
values. If ``collect`` is provided, a Table is returned with all original
columns, each containing values calculated by first grouping rows according to
to values in the ``labels`` column, then applying ``collect`` to each set of
grouped values in the other columns.
Note:
The grouped columns will appear first in the result table. If ``collect`` does not
accept arguments with one of the column types, that column will be empty in the resulting
table.
>>> marbles = Table().with_columns(
... "Color", make_array("Red", "Green", "Blue", "Red", "Green", "Green"),
... "Shape", make_array("Round", "Rectangular", "Rectangular", "Round", "Rectangular", "Round"),
... "Amount", make_array(4, 6, 12, 7, 9, 2),
... "Price", make_array(1.30, 1.30, 2.00, 1.75, 1.40, 1.00))
>>> marbles
Color | Shape | Amount | Price
Red | Round | 4 | 1.3
Green | Rectangular | 6 | 1.3
Blue | Rectangular | 12 | 2
Red | Round | 7 | 1.75
Green | Rectangular | 9 | 1.4
Green | Round | 2 | 1
>>> marbles.groups(["Color", "Shape"])
Color | Shape | count
Blue | Rectangular | 1
Green | Rectangular | 2
Green | Round | 1
Red | Round | 2
>>> marbles.groups(["Color", "Shape"], sum)
Color | Shape | Amount sum | Price sum
Blue | Rectangular | 12 | 2
Green | Rectangular | 15 | 2.7
Green | Round | 2 | 1
Red | Round | 11 | 3.05
|
371,984
|
def update(self, changed_state_model=None, with_expand=False):
if not self.view_is_registered:
return
if changed_state_model is None:
parent_row_iter = None
self.state_row_iter_dict_by_state_path.clear()
self.tree_store.clear()
if self._selected_sm_model:
changed_state_model = self._selected_sm_model.root_state
else:
return
else:
if changed_state_model.state.is_root_state:
parent_row_iter = self.state_row_iter_dict_by_state_path[changed_state_model.state.get_path()]
else:
if changed_state_model.state.is_root_state_of_library:
changed_upper_state_m = changed_state_model.parent.parent
else:
changed_upper_state_m = changed_state_model.parent
while changed_upper_state_m.state.get_path() not in self.state_row_iter_dict_by_state_path:
logger.warning("Take a parent state because this is not in.")
changed_upper_state_m = changed_upper_state_m.parent
parent_row_iter = self.state_row_iter_dict_by_state_path[changed_upper_state_m.state.get_path()]
self.insert_and_update_recursively(parent_row_iter, changed_state_model, with_expand)
|
Checks if all states are in tree and if tree has states which were deleted
:param changed_state_model: Model that row has to be updated
:param with_expand: The expand flag for the tree
|
371,985
|
def get_available_devices(self):
connected_devices = self.mbeds.list_mbeds() if self.mbeds else []
edbg_ports = self.available_edbg_ports()
for port in edbg_ports:
connected_devices.append({
"platform_name": "SAM4E",
"serial_port": port,
"mount_point": None,
"target_id": None,
"baud_rate": 460800
})
for dev in connected_devices:
dev[] = "unknown"
return connected_devices
|
Gets available devices using mbedls and self.available_edbg_ports.
:return: List of connected devices as dictionaries.
|
371,986
|
def make_directory(self, directory_name, *args, **kwargs):
self.dav_client().mkdir(self.join_path(self.session_path(), directory_name))
|
:meth:`.WNetworkClientProto.make_directory` method implementation
|
371,987
|
def server(self):
server = getattr(self, "_server", None)
if server is None:
log.debug("Binding datagram server to %s", self.bind)
server = DatagramServer(self.bind, self._response_received)
self._server = server
return server
|
UDP server to listen for responses.
|
371,988
|
def get_randomness_stream(self, decision_point: str, for_initialization: bool=False) -> RandomnessStream:
if decision_point in self._decision_points:
raise RandomnessError(f"Two separate places are attempting to create "
f"the same randomness stream for {decision_point}")
stream = RandomnessStream(key=decision_point, clock=self._clock, seed=self._seed,
index_map=self._key_mapping, manager=self, for_initialization=for_initialization)
self._decision_points[decision_point] = stream
return stream
|
Provides a new source of random numbers for the given decision point.
Parameters
----------
decision_point :
A unique identifier for a stream of random numbers. Typically represents
a decision that needs to be made each time step like 'moves_left' or
'gets_disease'.
for_initialization :
A flag indicating whether this stream is used to generate key initialization information
that will be used to identify simulants in the Common Random Number framework. These streams
cannot be copied and should only be used to generate the state table columns specified
in ``builder.configuration.randomness.key_columns``.
Raises
------
RandomnessError :
If another location in the simulation has already created a randomness stream
with the same identifier.
|
371,989
|
def get_array_dimensions(data):
depths_and_dimensions = get_depths_and_dimensions(data, 0)
grouped_by_depth = {
depth: tuple(dimension for depth, dimension in group)
for depth, group in groupby(depths_and_dimensions, itemgetter(0))
}
invalid_depths_dimensions = tuple(
(depth, dimensions)
for depth, dimensions in grouped_by_depth.items()
if len(set(dimensions)) != 1
)
if invalid_depths_dimensions:
raise ValidationError(
.join(
[
"Depth {0} of array data has more than one dimensions: {1}".
format(depth, dimensions)
for depth, dimensions in invalid_depths_dimensions
]
)
)
dimensions = tuple(
toolz.first(set(dimensions))
for depth, dimensions in sorted(grouped_by_depth.items())
)
return dimensions
|
Given an array type data item, check that it is an array and
return the dimensions as a tuple.
Ex: get_array_dimensions([[1, 2, 3], [4, 5, 6]]) returns (2, 3)
|
371,990
|
def prepare_socket(bind_addr, family, type, proto, nodelay, ssl_adapter):
sock = socket.socket(family, type, proto)
prevent_socket_inheritance(sock)
host, port = bind_addr[:2]
IS_EPHEMERAL_PORT = port == 0
if not (IS_WINDOWS or IS_EPHEMERAL_PORT):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if nodelay and not isinstance(bind_addr, str):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if ssl_adapter is not None:
sock = ssl_adapter.bind(sock)
listening_ipv6 = (
hasattr(socket, )
and family == socket.AF_INET6
and host in (, , )
)
if listening_ipv6:
try:
sock.setsockopt(
socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0,
)
except (AttributeError, socket.error):
pass
return sock
|
Create and prepare the socket object.
|
371,991
|
def batch_fetch_labels(ids):
m = {}
for id in ids:
label = anyont_fetch_label(id)
if label is not None:
m[id] = label
return m
|
fetch all rdfs:label assertions for a set of CURIEs
|
371,992
|
def add_project(self, ):
i = self.prj_tablev.currentIndex()
item = i.internalPointer()
if item:
project = item.internal_data()
if self._atype:
self._atype.projects.add(project)
elif self._dep:
self._dep.projects.add(project)
else:
project.users.add(self._user)
self.projects.append(project)
item.set_parent(None)
|
Add a project and store it in the self.projects
:returns: None
:rtype: None
:raises: None
|
371,993
|
def peek_at(iterable: Iterable[T]) -> Tuple[T, Iterator[T]]:
gen = iter(iterable)
peek = next(gen)
return peek, itertools.chain([peek], gen)
|
Returns the first value from iterable, as well as a new iterator with
the same content as the original iterable
|
371,994
|
def main_passpersist(self):
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if in line:
print("PONG")
elif in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
print(self.get_first())
else:
print(self.get_next(oid))
elif in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif in line:
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
|
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
|
371,995
|
def before_all(ctx):
ctx.client = get_client()
try:
ctx.client.inspect_image(IMAGE)
except NotFound:
ctx.client.pull(IMAGE)
|
Pulls down busybox:latest before anything is tested.
|
371,996
|
def alternative_short_name(self, name=None, entry_name=None, limit=None, as_df=False):
q = self.session.query(models.AlternativeShortName)
model_queries_config = (
(name, models.AlternativeShortName.name),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df)
|
Method to query :class:`.models.AlternativeShortlName` objects in database
:param name: alternative short name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.AlternativeShortName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.AlternativeShortName`) or :class:`pandas.DataFrame`
|
371,997
|
def clear_cache(backend=None):
fileserver = salt.fileserver.Fileserver(__opts__)
cleared, errors = fileserver.clear_cache(back=backend)
ret = {}
if cleared:
ret[] = cleared
if errors:
ret[] = errors
if not ret:
return
return ret
|
.. versionadded:: 2015.5.0
Clear the fileserver cache from VCS fileserver backends (:mod:`git
<salt.fileserver.gitfs>`, :mod:`hg <salt.fileserver.hgfs>`, :mod:`svn
<salt.fileserver.svnfs>`). Executing this runner with no arguments will
clear the cache for all enabled VCS fileserver backends, but this
can be narrowed using the ``backend`` argument.
backend
Only clear the update lock for the specified backend(s). If all passed
backends start with a minus sign (``-``), then these backends will be
excluded from the enabled backends. However, if there is a mix of
backends with and without a minus sign (ex: ``backend=-roots,git``)
then the ones starting with a minus sign will be disregarded.
CLI Example:
.. code-block:: bash
salt-run fileserver.clear_cache
salt-run fileserver.clear_cache backend=git,hg
salt-run fileserver.clear_cache hg
salt-run fileserver.clear_cache -roots
|
371,998
|
def handleNotification(self, handle, data):
_LOGGER.debug("Got notification from %s: %s", handle, codecs.encode(data, ))
if handle in self._callbacks:
self._callbacks[handle](data)
|
Handle Callback from a Bluetooth (GATT) request.
|
371,999
|
def array_addunique(path, value, create_parents=False, **kwargs):
return _gen_4spec(LCB_SDCMD_ARRAY_ADD_UNIQUE, path, value,
create_path=create_parents, **kwargs)
|
Add a new value to an array if the value does not exist.
:param path: The path to the array
:param value: Value to add to the array if it does not exist.
Currently the value is restricted to primitives: strings, numbers,
booleans, and `None` values.
:param create_parents: Create the array if it does not exist
.. note::
The actual position of the new item is unspecified. This means
it may be at the beginning, end, or middle of the existing
array)
This operation is only valid in :cb_bmeth:`mutate_in`.
.. seealso:: :func:`array_append`, :func:`upsert`
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.