code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_signature(self, req):
"""calculate the signature of the oss request
Returns the signatue
"""
oss_url = url.URL(req.url)
oss_headers = [
"{0}:{1}\n".format(key, val)
for key, val in req.headers.lower_items()
if key.startswith(self.X_OSS_PREFIX)
]
canonicalized_headers = "".join(sorted(oss_headers))
logger.debug(
"canonicalized header : [{0}]".format(canonicalized_headers)
)
oss_url.params = {
key: val
for key, val in oss_url.params.items()
if key in self.SUB_RESOURCES or key in self.OVERRIDE_QUERIES
}
oss_url.forge(key=lambda x: x[0])
canonicalized_str = "{0}/{1}{2}".format(
canonicalized_headers,
self.get_bucket(oss_url.host),
oss_url.uri
)
str_to_sign = "\n".join([
req.method,
req.headers["content-md5"],
req.headers["content-type"],
req.headers["date"],
canonicalized_str
])
logger.debug(
"signature str is \n{0}\n{1}\n{0}\n".format("#" * 20, str_to_sign)
)
if isinstance(str_to_sign, requests.compat.str):
str_to_sign = str_to_sign.encode("utf8")
signature_bin = hmac.new(self._secret_key, str_to_sign, hashlib.sha1)
signature = base64.b64encode(signature_bin.digest()).decode("utf8")
logger.debug("signature is [{0}]".format(signature))
return signature
|
calculate the signature of the oss request
Returns the signatue
|
def pydeps2reqs(deps):
"""Convert a deps instance into requirements.
"""
reqs = defaultdict(set)
for k, v in list(deps.items()):
# not a built-in
p = v['path']
if p and not p.startswith(sys.real_prefix):
if p.startswith(sys.prefix) and 'site-packages' in p:
if not p.endswith('.pyd'):
if '/win32/' in p.replace('\\', '/'):
reqs['win32'] |= set(v['imported_by'])
else:
name = k.split('.', 1)[0]
if name not in skiplist:
reqs[name] |= set(v['imported_by'])
if '_dummy' in reqs:
del reqs['_dummy']
return '\n'.join(dep2req(name, reqs[name]) for name in sorted(reqs))
|
Convert a deps instance into requirements.
|
def parse_url(url):
"""Return a clean URL. Remove the prefix for the Auth URL if Found.
:param url:
:return aurl:
"""
if url.startswith(('http', 'https', '//')):
if url.startswith('//'):
return urlparse.urlparse(url, scheme='http')
else:
return urlparse.urlparse(url)
else:
return urlparse.urlparse(urlparse.urljoin('http://', url))
|
Return a clean URL. Remove the prefix for the Auth URL if Found.
:param url:
:return aurl:
|
def is_valid_regex(string):
"""
Checks whether the re module can compile the given regular expression.
Parameters
----------
string: str
Returns
-------
boolean
"""
try:
re.compile(string)
is_valid = True
except re.error:
is_valid = False
return is_valid
|
Checks whether the re module can compile the given regular expression.
Parameters
----------
string: str
Returns
-------
boolean
|
def yaml_to_str(data: Mapping) -> str:
"""
Return the given given config as YAML str.
:param data: configuration dict
:return: given configuration as yaml str
"""
return yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper)
|
Return the given given config as YAML str.
:param data: configuration dict
:return: given configuration as yaml str
|
def sv_variant(store, institute_id, case_name, variant_id=None, variant_obj=None, add_case=True,
get_overlapping=True):
"""Pre-process an SV variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
variant_obj(dcit)
add_case(bool): If information about case files should be added
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
}
"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if not variant_obj:
variant_obj = store.variant(variant_id)
if add_case:
# fill in information for pilup view
variant_case(store, case_obj, variant_obj)
# frequencies
variant_obj['frequencies'] = [
('1000G', variant_obj.get('thousand_genomes_frequency')),
('1000G (left)', variant_obj.get('thousand_genomes_frequency_left')),
('1000G (right)', variant_obj.get('thousand_genomes_frequency_right')),
('ClinGen CGH (benign)', variant_obj.get('clingen_cgh_benign')),
('ClinGen CGH (pathogenic)', variant_obj.get('clingen_cgh_pathogenic')),
('ClinGen NGI', variant_obj.get('clingen_ngi')),
('SweGen', variant_obj.get('swegen')),
('Decipher', variant_obj.get('decipher')),
]
variant_obj['callers'] = callers(variant_obj, category='sv')
overlapping_snvs = []
if get_overlapping:
overlapping_snvs = (parse_variant(store, institute_obj, case_obj, variant) for variant in
store.overlapping(variant_obj))
# parse_gene function is not called for SVs, but a link to ensembl gene is required
for gene_obj in variant_obj['genes']:
if gene_obj.get('common'):
ensembl_id = gene_obj['common']['ensembl_id']
try:
build = int(gene_obj['common'].get('build','37'))
except Exception:
build = 37
gene_obj['ensembl_link'] = ensembl(ensembl_id, build=build)
variant_obj['comments'] = store.events(institute_obj, case=case_obj,
variant_id=variant_obj['variant_id'], comments=True)
case_clinvars = store.case_to_clinVars(case_obj.get('display_name'))
if variant_id in case_clinvars:
variant_obj['clinvar_clinsig'] = case_clinvars.get(variant_id)['clinsig']
if not 'end_chrom' in variant_obj:
variant_obj['end_chrom'] = variant_obj['chromosome']
return {
'institute': institute_obj,
'case': case_obj,
'variant': variant_obj,
'overlapping_snvs': overlapping_snvs,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
}
|
Pre-process an SV variant entry for detail page.
Adds information to display variant
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
case_name(str)
variant_id(str)
variant_obj(dcit)
add_case(bool): If information about case files should be added
Returns:
detailed_information(dict): {
'institute': <institute_obj>,
'case': <case_obj>,
'variant': <variant_obj>,
'overlapping_snvs': <overlapping_snvs>,
'manual_rank_options': MANUAL_RANK_OPTIONS,
'dismiss_variant_options': DISMISS_VARIANT_OPTIONS
}
|
def z__update(self):
"""Triple quoted baseline representation.
Return string with multiple triple quoted baseline strings when
baseline had been compared multiple times against varying strings.
:returns: source file baseline replacement text
:rtype: str
"""
updates = []
for text in self._updates:
if self._AVOID_RAW_FORM:
text_repr = multiline_repr(text)
raw_char = ''
else:
text_repr = multiline_repr(text, RAW_MULTILINE_CHARS)
if len(text_repr) == len(text):
raw_char = 'r' if '\\' in text_repr else ''
else:
# must have special characters that required added backslash
# escaping, use normal representation to get backslashes right
text_repr = multiline_repr(text)
raw_char = ''
# use triple double quote, except use triple single quote when
# triple double quote is present to avoid syntax errors
quotes = '"""'
if quotes in text:
quotes = "'''"
# Wrap with blank lines when multi-line or when text ends with
# characters that would otherwise result in a syntax error in
# the formatted representation.
multiline = self._indent or ('\n' in text)
if multiline or text.endswith('\\') or text.endswith(quotes[0]):
update = raw_char + quotes + '\n' + text_repr + '\n' + quotes
else:
update = raw_char + quotes + text_repr + quotes
updates.append(update)
# sort updates so Python hash seed has no impact on regression test
update = '\n'.join(sorted(updates))
indent = ' ' * self._indent
lines = ((indent + line) if line else '' for line in update.split('\n'))
return '\n'.join(lines).lstrip()
|
Triple quoted baseline representation.
Return string with multiple triple quoted baseline strings when
baseline had been compared multiple times against varying strings.
:returns: source file baseline replacement text
:rtype: str
|
def _set_comment(self, section, comment, key=None):
"""
Set a comment for section or key
:param str section: Section to add comment to
:param str comment: Comment to add
:param str key: Key to add comment to
"""
if '\n' in comment:
comment = '\n# '.join(comment.split('\n'))
comment = '# ' + comment
if key:
self._comments[(section, key)] = comment
else:
self._comments[section] = comment
|
Set a comment for section or key
:param str section: Section to add comment to
:param str comment: Comment to add
:param str key: Key to add comment to
|
def largest_compartment_id_met(model):
"""
Return the ID of the compartment with the most metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
string
Compartment ID of the compartment with the most metabolites.
"""
# Sort compartments by decreasing size and extract the largest two.
candidate, second = sorted(
((c, len(metabolites_per_compartment(model, c)))
for c in model.compartments), reverse=True, key=itemgetter(1))[:2]
# Compare the size of the compartments.
if candidate[1] == second[1]:
raise RuntimeError("There is a tie for the largest compartment. "
"Compartment {} and {} have equal amounts of "
"metabolites.".format(candidate[0], second[0]))
else:
return candidate[0]
|
Return the ID of the compartment with the most metabolites.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Returns
-------
string
Compartment ID of the compartment with the most metabolites.
|
def fit(self,
target_type,
target,
adjust_thickness=False,
adjust_site_atten=False,
adjust_source_vel=False):
"""
Fit to a target crustal amplification or site term.
The fitting process adjusts the velocity, site attenuation, and layer
thickness (if enabled) to fit a target values. The frequency range is
specified by the input motion.
Parameters
----------
target_type: str
Options are 'crustal_amp' to only fit to the crustal amplification,
or 'site_term' to fit both the velocity and the site attenuation
parameter.
target: `array_like`
Target values.
adjust_thickness: bool (optional)
If the thickness of the layers is adjusted as well, default: False.
adjust_site_atten: bool (optional)
If the site attenuation is adjusted as well, default: False.
adjust_source_vel: bool (optional)
If the source velocity should be adjusted, default: False.
Returns
-------
profile: `pyrsa.site.Profile`
profile optimized to fit a target amplification.
"""
density = self.profile.density
nl = len(density)
# Slowness bounds
slowness = self.profile.slowness
thickness = self.profile.thickness
site_atten = self._site_atten
# Slowness
initial = slowness
bounds = 1 / np.tile((4000, 100), (nl, 1))
if not adjust_source_vel:
bounds[-1] = (initial[-1], initial[-1])
# Thickness bounds
if adjust_thickness:
bounds = np.r_[bounds, [[t / 2, 2 * t] for t in thickness]]
initial = np.r_[initial, thickness]
# Site attenuation bounds
if adjust_site_atten:
bounds = np.r_[bounds, [[0.0001, 0.200]]]
initial = np.r_[initial, self.site_atten]
def calc_rmse(this, that):
return np.mean(((this - that) / that) ** 2)
def err(x):
_slowness = x[0:nl]
if adjust_thickness:
_thickness = x[nl:(2 * nl)]
else:
_thickness = thickness
if adjust_site_atten:
self._site_atten = x[-1]
crustal_amp, site_term = self._calc_amp(density, _thickness,
_slowness)
calc = crustal_amp if target_type == 'crustal_amp' else site_term
err = 10 * calc_rmse(target, calc)
# Prefer the original values so add the difference to the error
err += calc_rmse(slowness, _slowness)
if adjust_thickness:
err += calc_rmse(thickness, _thickness)
if adjust_site_atten:
err += calc_rmse(self._site_atten, site_atten)
return err
res = minimize(err, initial, method='L-BFGS-B', bounds=bounds)
slowness = res.x[0:nl]
if adjust_thickness:
thickness = res.x[nl:(2 * nl)]
profile = Profile([
Layer(l.soil_type, t, 1 / s)
for l, t, s in zip(self.profile, thickness, slowness)
], self.profile.wt_depth)
# Update the calculated amplificaiton
return (self.motion, profile, self.loc_input)
|
Fit to a target crustal amplification or site term.
The fitting process adjusts the velocity, site attenuation, and layer
thickness (if enabled) to fit a target values. The frequency range is
specified by the input motion.
Parameters
----------
target_type: str
Options are 'crustal_amp' to only fit to the crustal amplification,
or 'site_term' to fit both the velocity and the site attenuation
parameter.
target: `array_like`
Target values.
adjust_thickness: bool (optional)
If the thickness of the layers is adjusted as well, default: False.
adjust_site_atten: bool (optional)
If the site attenuation is adjusted as well, default: False.
adjust_source_vel: bool (optional)
If the source velocity should be adjusted, default: False.
Returns
-------
profile: `pyrsa.site.Profile`
profile optimized to fit a target amplification.
|
def name(value):
"""Get the string title for a particular type.
Given a value, get an appropriate string title for the type that can
be used to re-cast the value later.
"""
if value is None:
return 'any'
for (test, name) in TESTS:
if isinstance(value, test):
return name
return 'string'
|
Get the string title for a particular type.
Given a value, get an appropriate string title for the type that can
be used to re-cast the value later.
|
def is_valid_intensity_measure_types(self):
"""
If the IMTs and levels are extracted from the risk models,
they must not be set directly. Moreover, if
`intensity_measure_types_and_levels` is set directly,
`intensity_measure_types` must not be set.
"""
if self.ground_motion_correlation_model:
for imt in self.imtls:
if not (imt.startswith('SA') or imt == 'PGA'):
raise ValueError(
'Correlation model %s does not accept IMT=%s' % (
self.ground_motion_correlation_model, imt))
if self.risk_files: # IMTLs extracted from the risk files
return (self.intensity_measure_types is None and
self.intensity_measure_types_and_levels is None)
elif not hasattr(self, 'hazard_imtls') and not hasattr(
self, 'risk_imtls'):
return False
return True
|
If the IMTs and levels are extracted from the risk models,
they must not be set directly. Moreover, if
`intensity_measure_types_and_levels` is set directly,
`intensity_measure_types` must not be set.
|
def get_iam_policy(self):
"""Gets the access control policy for an instance resource.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_get_iam_policy]
:end-before: [END bigtable_get_iam_policy]
:rtype: :class:`google.cloud.bigtable.policy.Policy`
:returns: The current IAM policy of this instance
"""
instance_admin_client = self._client.instance_admin_client
resp = instance_admin_client.get_iam_policy(resource=self.name)
return Policy.from_pb(resp)
|
Gets the access control policy for an instance resource.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_get_iam_policy]
:end-before: [END bigtable_get_iam_policy]
:rtype: :class:`google.cloud.bigtable.policy.Policy`
:returns: The current IAM policy of this instance
|
def reorder(self, indices: mx.nd.NDArray) -> None:
"""
Reorders the avoid list according to the selected row indices.
This can produce duplicates, but this is fixed if state changes occur in consume().
:param indices: An mx.nd.NDArray containing indices of hypotheses to select.
"""
if self.global_avoid_states:
self.global_avoid_states = [self.global_avoid_states[x] for x in indices.asnumpy()]
if self.local_avoid_states:
self.local_avoid_states = [self.local_avoid_states[x] for x in indices.asnumpy()]
|
Reorders the avoid list according to the selected row indices.
This can produce duplicates, but this is fixed if state changes occur in consume().
:param indices: An mx.nd.NDArray containing indices of hypotheses to select.
|
def create(container, portal_type, *args, **kwargs):
"""Creates an object in Bika LIMS
This code uses most of the parts from the TypesTool
see: `Products.CMFCore.TypesTool._constructInstance`
:param container: container
:type container: ATContentType/DexterityContentType/CatalogBrain
:param portal_type: The portal type to create, e.g. "Client"
:type portal_type: string
:param title: The title for the new content object
:type title: string
:returns: The new created object
"""
from bika.lims.utils import tmpID
if kwargs.get("title") is None:
kwargs["title"] = "New {}".format(portal_type)
# generate a temporary ID
tmp_id = tmpID()
# get the fti
types_tool = get_tool("portal_types")
fti = types_tool.getTypeInfo(portal_type)
if fti.product:
obj = _createObjectByType(portal_type, container, tmp_id)
else:
# newstyle factory
factory = getUtility(IFactory, fti.factory)
obj = factory(tmp_id, *args, **kwargs)
if hasattr(obj, '_setPortalTypeName'):
obj._setPortalTypeName(fti.getId())
notify(ObjectCreatedEvent(obj))
# notifies ObjectWillBeAddedEvent, ObjectAddedEvent and
# ContainerModifiedEvent
container._setObject(tmp_id, obj)
# we get the object here with the current object id, as it might be
# renamed already by an event handler
obj = container._getOb(obj.getId())
# handle AT Content
if is_at_content(obj):
obj.processForm()
# Edit after processForm; processForm does AT unmarkCreationFlag.
obj.edit(**kwargs)
# explicit notification
modified(obj)
return obj
|
Creates an object in Bika LIMS
This code uses most of the parts from the TypesTool
see: `Products.CMFCore.TypesTool._constructInstance`
:param container: container
:type container: ATContentType/DexterityContentType/CatalogBrain
:param portal_type: The portal type to create, e.g. "Client"
:type portal_type: string
:param title: The title for the new content object
:type title: string
:returns: The new created object
|
def comments(self):
"""获取答案下的所有评论.
:return: 答案下的所有评论,返回生成器
:rtype: Comments.Iterable
"""
import math
from .author import Author, ANONYMOUS
from .comment import Comment
api_url = Get_Answer_Comment_URL.format(self.aid)
page = pages = 1
while page <= pages:
res = self._session.get(api_url + '?page=' + str(page))
if page == 1:
total = int(res.json()['paging']['totalCount'])
if total == 0:
return
pages = math.ceil(total / 30)
page += 1
comment_items = res.json()['data']
for comment_item in comment_items:
comment_id = comment_item['id']
content = comment_item['content']
upvote_num = comment_item['likesCount']
time_string = comment_item['createdTime'][:19]
time = datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S")
if comment_item['author'].get('url') is not None:
a_url = comment_item['author']['url']
a_name = comment_item['author']['name']
photo_url_tmp = comment_item['author']['avatar']['template']
photo_url_id = comment_item['author']['avatar']['id']
a_photo_url = photo_url_tmp.replace(
'{id}', photo_url_id).replace('_{size}', '')
author_obj = Author(a_url, a_name, photo_url=a_photo_url,
session=self._session)
else:
author_obj = ANONYMOUS
yield Comment(comment_id, self, author_obj, upvote_num, content, time)
|
获取答案下的所有评论.
:return: 答案下的所有评论,返回生成器
:rtype: Comments.Iterable
|
def compute_dosage(expec, alt=None):
r""" Compute dosage from allele expectation.
Parameters
----------
expec : array_like
Allele expectations encoded as a samples-by-alleles matrix.
alt : array_like, optional
Alternative allele index. If ``None``, the allele having the minor
allele frequency for the provided ``expec`` is used as the alternative.
Defaults to ``None``.
Returns
-------
:class:`numpy.ndarray`
Dosage encoded as an array of size equal to the number of samples.
Examples
--------
.. code-block:: python
:caption: First a quick-start example.
>>> from bgen_reader import allele_expectation, compute_dosage
>>> from bgen_reader import example_files, read_bgen
>>>
>>> # Download an example.
>>> example = example_files("example.32bits.bgen")
>>> filepath = example.filepath
>>>
>>> # Read the example.
>>> bgen = read_bgen(filepath, verbose=False)
>>>
>>> # Extract the allele expectations of the fourth variant.
>>> variant_idx = 3
>>> e = allele_expectation(bgen, variant_idx)
>>>
>>> # Compute the dosage when considering the first allele
>>> # as the reference/alternative one.
>>> alt_allele_idx = 1
>>> d = compute_dosage(e, alt=alt_allele_idx)
>>>
>>> # Print the dosage of the first five samples only.
>>> print(d[:5])
[1.96185308 0.00982666 0.01745552 1.00347899 1.01153563]
>>>
>>> # Clean-up the example
>>> example.close()
.. code-block:: python
:caption: Genotype probabilities, allele expectations and frequencies.
>>> from bgen_reader import (
... allele_expectation,
... allele_frequency,
... compute_dosage,
... example_files,
... read_bgen,
... )
>>> from pandas import DataFrame
>>> from xarray import DataArray
>>>
>>> # Download an example
>>> example = example_files("example.32bits.bgen")
>>> filepath = example.filepath
>>>
>>> # Open the bgen file.
>>> bgen = read_bgen(filepath, verbose=False)
>>> variants = bgen["variants"]
>>> genotype = bgen["genotype"]
>>> samples = bgen["samples"]
>>>
>>> variant_idx = 3
>>> variant = variants.loc[variant_idx].compute()
>>> # Print the metadata of the fourth variant.
>>> print(variant)
id rsid chrom pos nalleles allele_ids vaddr
3 SNPID_5 RSID_5 01 5000 2 A,G 16034
>>> geno = bgen["genotype"][variant_idx].compute()
>>> metageno = DataFrame({k: geno[k] for k in ["ploidy", "missing"]},
... index=samples)
>>> metageno.index.name = "sample"
>>> print(metageno) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
ploidy missing
sample
sample_001 2 False
sample_002 2 False
sample_003 2 False
sample_004 2 False
... ... ...
sample_497 2 False
sample_498 2 False
sample_499 2 False
sample_500 2 False
<BLANKLINE>
[500 rows x 2 columns]
>>> p = DataArray(
... geno["probs"],
... name="probability",
... coords={"sample": samples},
... dims=["sample", "genotype"],
... )
>>> # Print the genotype probabilities.
>>> print(p.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
genotype 0 1 2
sample
sample_001 0.00488 0.02838 0.96674
sample_002 0.99045 0.00928 0.00027
sample_003 0.98932 0.00391 0.00677
sample_004 0.00662 0.98328 0.01010
... ... ... ...
sample_497 0.00137 0.01312 0.98550
sample_498 0.00552 0.99423 0.00024
sample_499 0.01266 0.01154 0.97580
sample_500 0.00021 0.98431 0.01547
<BLANKLINE>
[500 rows x 3 columns]
>>> alleles = variant["allele_ids"].item().split(",")
>>> e = DataArray(
... allele_expectation(bgen, variant_idx),
... name="expectation",
... coords={"sample": samples, "allele": alleles},
... dims=["sample", "allele"],
... )
>>> # Print the allele expectations.
>>> print(e.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
allele A G
sample
sample_001 0.03815 1.96185
sample_002 1.99017 0.00983
sample_003 1.98254 0.01746
sample_004 0.99652 1.00348
... ... ...
sample_497 0.01587 1.98413
sample_498 1.00528 0.99472
sample_499 0.03687 1.96313
sample_500 0.98474 1.01526
<BLANKLINE>
[500 rows x 2 columns]
>>> rsid = variant["rsid"].item()
>>> chrom = variant["chrom"].item()
>>> variant_name = f"{chrom}:{rsid}"
>>> f = DataFrame(allele_frequency(e), columns=[variant_name], index=alleles)
>>> f.index.name = "allele"
>>> # Allele frequencies.
>>> print(f) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
01:RSID_5
allele
A 305.97218
G 194.02782
>>> alt = f.idxmin().item()
>>> alt_idx = alleles.index(alt)
>>> d = compute_dosage(e, alt=alt_idx).to_series()
>>> d = DataFrame(d.values, columns=[f"alt={alt}"], index=d.index)
>>> # Dosages when considering G as the alternative allele.
>>> print(d) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
alt=G
sample
sample_001 1.96185
sample_002 0.00983
sample_003 0.01746
sample_004 1.00348
... ...
sample_497 1.98413
sample_498 0.99472
sample_499 1.96313
sample_500 1.01526
<BLANKLINE>
[500 rows x 1 columns]
>>>
>>> # Clean-up the example
>>> example.close()
"""
if alt is None:
return expec[..., -1]
try:
return expec[:, alt]
except NotImplementedError:
alt = asarray(alt, int)
return asarray(expec, float)[:, alt]
|
r""" Compute dosage from allele expectation.
Parameters
----------
expec : array_like
Allele expectations encoded as a samples-by-alleles matrix.
alt : array_like, optional
Alternative allele index. If ``None``, the allele having the minor
allele frequency for the provided ``expec`` is used as the alternative.
Defaults to ``None``.
Returns
-------
:class:`numpy.ndarray`
Dosage encoded as an array of size equal to the number of samples.
Examples
--------
.. code-block:: python
:caption: First a quick-start example.
>>> from bgen_reader import allele_expectation, compute_dosage
>>> from bgen_reader import example_files, read_bgen
>>>
>>> # Download an example.
>>> example = example_files("example.32bits.bgen")
>>> filepath = example.filepath
>>>
>>> # Read the example.
>>> bgen = read_bgen(filepath, verbose=False)
>>>
>>> # Extract the allele expectations of the fourth variant.
>>> variant_idx = 3
>>> e = allele_expectation(bgen, variant_idx)
>>>
>>> # Compute the dosage when considering the first allele
>>> # as the reference/alternative one.
>>> alt_allele_idx = 1
>>> d = compute_dosage(e, alt=alt_allele_idx)
>>>
>>> # Print the dosage of the first five samples only.
>>> print(d[:5])
[1.96185308 0.00982666 0.01745552 1.00347899 1.01153563]
>>>
>>> # Clean-up the example
>>> example.close()
.. code-block:: python
:caption: Genotype probabilities, allele expectations and frequencies.
>>> from bgen_reader import (
... allele_expectation,
... allele_frequency,
... compute_dosage,
... example_files,
... read_bgen,
... )
>>> from pandas import DataFrame
>>> from xarray import DataArray
>>>
>>> # Download an example
>>> example = example_files("example.32bits.bgen")
>>> filepath = example.filepath
>>>
>>> # Open the bgen file.
>>> bgen = read_bgen(filepath, verbose=False)
>>> variants = bgen["variants"]
>>> genotype = bgen["genotype"]
>>> samples = bgen["samples"]
>>>
>>> variant_idx = 3
>>> variant = variants.loc[variant_idx].compute()
>>> # Print the metadata of the fourth variant.
>>> print(variant)
id rsid chrom pos nalleles allele_ids vaddr
3 SNPID_5 RSID_5 01 5000 2 A,G 16034
>>> geno = bgen["genotype"][variant_idx].compute()
>>> metageno = DataFrame({k: geno[k] for k in ["ploidy", "missing"]},
... index=samples)
>>> metageno.index.name = "sample"
>>> print(metageno) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
ploidy missing
sample
sample_001 2 False
sample_002 2 False
sample_003 2 False
sample_004 2 False
... ... ...
sample_497 2 False
sample_498 2 False
sample_499 2 False
sample_500 2 False
<BLANKLINE>
[500 rows x 2 columns]
>>> p = DataArray(
... geno["probs"],
... name="probability",
... coords={"sample": samples},
... dims=["sample", "genotype"],
... )
>>> # Print the genotype probabilities.
>>> print(p.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
genotype 0 1 2
sample
sample_001 0.00488 0.02838 0.96674
sample_002 0.99045 0.00928 0.00027
sample_003 0.98932 0.00391 0.00677
sample_004 0.00662 0.98328 0.01010
... ... ... ...
sample_497 0.00137 0.01312 0.98550
sample_498 0.00552 0.99423 0.00024
sample_499 0.01266 0.01154 0.97580
sample_500 0.00021 0.98431 0.01547
<BLANKLINE>
[500 rows x 3 columns]
>>> alleles = variant["allele_ids"].item().split(",")
>>> e = DataArray(
... allele_expectation(bgen, variant_idx),
... name="expectation",
... coords={"sample": samples, "allele": alleles},
... dims=["sample", "allele"],
... )
>>> # Print the allele expectations.
>>> print(e.to_series().unstack(level=-1)) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
allele A G
sample
sample_001 0.03815 1.96185
sample_002 1.99017 0.00983
sample_003 1.98254 0.01746
sample_004 0.99652 1.00348
... ... ...
sample_497 0.01587 1.98413
sample_498 1.00528 0.99472
sample_499 0.03687 1.96313
sample_500 0.98474 1.01526
<BLANKLINE>
[500 rows x 2 columns]
>>> rsid = variant["rsid"].item()
>>> chrom = variant["chrom"].item()
>>> variant_name = f"{chrom}:{rsid}"
>>> f = DataFrame(allele_frequency(e), columns=[variant_name], index=alleles)
>>> f.index.name = "allele"
>>> # Allele frequencies.
>>> print(f) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
01:RSID_5
allele
A 305.97218
G 194.02782
>>> alt = f.idxmin().item()
>>> alt_idx = alleles.index(alt)
>>> d = compute_dosage(e, alt=alt_idx).to_series()
>>> d = DataFrame(d.values, columns=[f"alt={alt}"], index=d.index)
>>> # Dosages when considering G as the alternative allele.
>>> print(d) # doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
alt=G
sample
sample_001 1.96185
sample_002 0.00983
sample_003 0.01746
sample_004 1.00348
... ...
sample_497 1.98413
sample_498 0.99472
sample_499 1.96313
sample_500 1.01526
<BLANKLINE>
[500 rows x 1 columns]
>>>
>>> # Clean-up the example
>>> example.close()
|
def normalize(self, mode="max", value=1):
"""
Normalize the spectrum with respect to the sum of intensity
Args:
mode (str): Normalization mode. Supported modes are "max" (set the
max y value to value, e.g., in XRD patterns), "sum" (set the
sum of y to a value, i.e., like a probability density).
value (float): Value to normalize to. Defaults to 1.
"""
if mode.lower() == "sum":
factor = np.sum(self.y, axis=0)
elif mode.lower() == "max":
factor = np.max(self.y, axis=0)
else:
raise ValueError("Unsupported normalization mode %s!" % mode)
self.y /= factor / value
|
Normalize the spectrum with respect to the sum of intensity
Args:
mode (str): Normalization mode. Supported modes are "max" (set the
max y value to value, e.g., in XRD patterns), "sum" (set the
sum of y to a value, i.e., like a probability density).
value (float): Value to normalize to. Defaults to 1.
|
def get_jobs(self, project, **params):
"""
Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
"""
return self._get_json_list(self.JOBS_ENDPOINT, project, **params)
|
Gets jobs from project, filtered by parameters
:param project: project (repository name) to query data for
:param params: keyword arguments to filter results
|
def score_cosine(self, term1, term2, **kwargs):
"""
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
"""
t1_kde = self.kde(term1, **kwargs)
t2_kde = self.kde(term2, **kwargs)
return 1-distance.cosine(t1_kde, t2_kde)
|
Compute a weighting score based on the cosine distance between the
kernel density estimates of two terms.
Args:
term1 (str)
term2 (str)
Returns: float
|
def get_config_path():
"""
Determine the path to the config file. This will return, in this order of
precedence:
- the value of $BUGWARRIORRC if set
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc if exists
- ~/.bugwarriorrc if exists
- <dir>/bugwarrior/bugwarriorc if exists, for dir in $XDG_CONFIG_DIRS
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc otherwise
"""
if os.environ.get(BUGWARRIORRC):
return os.environ[BUGWARRIORRC]
xdg_config_home = (
os.environ.get('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'))
xdg_config_dirs = (
(os.environ.get('XDG_CONFIG_DIRS') or '/etc/xdg').split(':'))
paths = [
os.path.join(xdg_config_home, 'bugwarrior', 'bugwarriorrc'),
os.path.expanduser("~/.bugwarriorrc")]
paths += [
os.path.join(d, 'bugwarrior', 'bugwarriorrc') for d in xdg_config_dirs]
for path in paths:
if os.path.exists(path):
return path
return paths[0]
|
Determine the path to the config file. This will return, in this order of
precedence:
- the value of $BUGWARRIORRC if set
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc if exists
- ~/.bugwarriorrc if exists
- <dir>/bugwarrior/bugwarriorc if exists, for dir in $XDG_CONFIG_DIRS
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc otherwise
|
def get_limits(self):
"""
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
Limits from:
docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
"""
if not self.limits:
self.limits = {}
for item in [self.MAX_RRSETS_BY_ZONE,
self.MAX_VPCS_ASSOCIATED_BY_ZONE]:
self.limits[item["name"]] = AwsLimit(
item["name"],
self,
item["default_limit"],
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::Route53::HostedZone',
limit_subtype=item["name"]
)
return self.limits
|
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
Limits from:
docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
|
def _pos(self, k):
"""
Description:
Position k breaking
Parameters:
k: position k is used for the breaking
"""
if k < 2:
raise ValueError("k smaller than 2")
G = np.zeros((self.m, self.m))
for i in range(self.m):
for j in range(self.m):
if i == j:
continue
if i < k or j < k:
continue
if i == k or j == k:
G[i][j] = 1
return G
|
Description:
Position k breaking
Parameters:
k: position k is used for the breaking
|
def __xd_iterator_pass_on(arr, view, fun):
"""
Like xd_iterator, but the fun return values are always passed on to the next and only the last returned.
"""
# create list of iterations
iterations = [[None] if dim in view else list(range(arr.shape[dim])) for dim in range(arr.ndim)]
# iterate, create slicer, execute function and collect results
passon = None
for indices in itertools.product(*iterations):
slicer = [slice(None) if idx is None else slice(idx, idx + 1) for idx in indices]
passon = fun(scipy.squeeze(arr[slicer]), passon)
return passon
|
Like xd_iterator, but the fun return values are always passed on to the next and only the last returned.
|
def register_producer(cls, producer):
"""
Register a default producer for events to use.
:param producer: the default producer to to dispatch events on.
"""
log.info('@Registry.register_producer `{}`'
.format(producer.__class__.__name__))
cls._producer = (cls._producer or producer)
|
Register a default producer for events to use.
:param producer: the default producer to to dispatch events on.
|
def get_trending_daily_not_starred(self):
"""Gets trending repositories NOT starred by user
:return: List of daily-trending repositories which are not starred
"""
trending_daily = self.get_trending_daily() # repos trending daily
starred_repos = self.get_starred_repos() # repos starred by user
repos_list = []
for repo in trending_daily:
if repo not in starred_repos:
repos_list.append(repo)
return repos_list
|
Gets trending repositories NOT starred by user
:return: List of daily-trending repositories which are not starred
|
def check_token(token):
''' Verify http header token authentification '''
user = models.User.objects(api_key=token).first()
return user or None
|
Verify http header token authentification
|
def loginfo(logger, msg, *args, **kwargs):
'''
Logs messages as INFO,
unless esgfpid.defaults.LOG_INFO_TO_DEBUG,
(then it logs messages as DEBUG).
'''
if esgfpid.defaults.LOG_INFO_TO_DEBUG:
logger.debug(msg, *args, **kwargs)
else:
logger.info(msg, *args, **kwargs)
|
Logs messages as INFO,
unless esgfpid.defaults.LOG_INFO_TO_DEBUG,
(then it logs messages as DEBUG).
|
def yield_figs(self, **kwargs): # pragma: no cover
"""
This function *generates* a predefined list of matplotlib figures with minimal input from the user.
"""
yield self.plot_densities(title="PAW densities", show=False)
yield self.plot_waves(title="PAW waves", show=False)
yield self.plot_projectors(title="PAW projectors", show=False)
|
This function *generates* a predefined list of matplotlib figures with minimal input from the user.
|
def _get_tree_properties(root):
"""Inspect the binary tree and return its properties (e.g. height).
:param root: Root node of the binary tree.
:rtype: binarytree.Node
:return: Binary tree properties.
:rtype: dict
"""
is_descending = True
is_ascending = True
min_node_value = root.value
max_node_value = root.value
size = 0
leaf_count = 0
min_leaf_depth = 0
max_leaf_depth = -1
is_strict = True
is_complete = True
current_nodes = [root]
non_full_node_seen = False
while len(current_nodes) > 0:
max_leaf_depth += 1
next_nodes = []
for node in current_nodes:
size += 1
value = node.value
min_node_value = min(value, min_node_value)
max_node_value = max(value, max_node_value)
# Node is a leaf.
if node.left is None and node.right is None:
if min_leaf_depth == 0:
min_leaf_depth = max_leaf_depth
leaf_count += 1
if node.left is not None:
if node.left.value > value:
is_descending = False
elif node.left.value < value:
is_ascending = False
next_nodes.append(node.left)
is_complete = not non_full_node_seen
else:
non_full_node_seen = True
if node.right is not None:
if node.right.value > value:
is_descending = False
elif node.right.value < value:
is_ascending = False
next_nodes.append(node.right)
is_complete = not non_full_node_seen
else:
non_full_node_seen = True
# If we see a node with only one child, it is not strict
is_strict &= (node.left is None) == (node.right is None)
current_nodes = next_nodes
return {
'height': max_leaf_depth,
'size': size,
'is_max_heap': is_complete and is_descending,
'is_min_heap': is_complete and is_ascending,
'is_perfect': leaf_count == 2 ** max_leaf_depth,
'is_strict': is_strict,
'is_complete': is_complete,
'leaf_count': leaf_count,
'min_node_value': min_node_value,
'max_node_value': max_node_value,
'min_leaf_depth': min_leaf_depth,
'max_leaf_depth': max_leaf_depth,
}
|
Inspect the binary tree and return its properties (e.g. height).
:param root: Root node of the binary tree.
:rtype: binarytree.Node
:return: Binary tree properties.
:rtype: dict
|
def embed(self, rel, other, wrap=False):
"""Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
"""
if other == self:
return
embedded = self.o.setdefault(EMBEDDED_KEY, {})
collected_embedded = CanonicalRels(embedded,
self.curies,
self.base_uri)
if rel not in collected_embedded:
if wrap:
embedded[rel] = [other.as_object()]
else:
embedded[rel] = other.as_object()
else:
original_rel = collected_embedded.original_key(rel)
current_embedded = embedded[original_rel]
if isinstance(current_embedded, list):
current_embedded.append(other.as_object())
else:
embedded[original_rel] = [current_embedded, other.as_object()]
if not self.draft.automatic_link:
return
url = other.url()
if not url:
return
if url in (link.url() for link in self.links.get(rel, [])):
return
self.add_link(rel, other, wrap=wrap)
|
Embeds a document inside this document.
Arguments:
- ``rel``: a string specifying the link relationship type of the
embedded resource. ``rel`` should be a well-known link relation name
from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``other``: a ``Document`` instance that will be embedded in this
document. If ``other`` is identical to this document, this method
will silently fail.
- ``wrap``: Defaults to False, but if True, specifies that the embedded
resource object should be initally wrapped in a JSON array even if it
is the first embedded resource for the given ``rel``.
Calling code should use this method to add embedded resources instead
of modifying ``embedded`` directly.
This method embeds the given document in this document with the given
``rel``. If one or more documents have already been embedded for that
``rel``, the new document will be embedded in addition to those
documents.
|
def config(self, kw=None, **kwargs):
"""configure redirect to support additional options"""
themebg = kwargs.pop("themebg", self._themebg)
toplevel = kwargs.pop("toplevel", self._toplevel)
theme = kwargs.pop("theme", self.current_theme)
color = self._get_bg_color()
if themebg != self._themebg:
if themebg is False:
self.configure(bg="white")
else:
self.configure(bg=color)
self._themebg = themebg
if toplevel != self._toplevel:
if toplevel is True:
self._setup_toplevel_hook(color)
else:
tk.Toplevel.__init__ = self.__init__toplevel
self._toplevel = toplevel
if theme != self.current_theme:
self.set_theme(theme)
return tk.Tk.config(self, kw, **kwargs)
|
configure redirect to support additional options
|
def CopyRecord(record, **field_overrides):
"""Copies a record and its fields, recurses for any field that is a Record.
For records that have nested mutable fields, use copy.deepcopy.
Args:
record: A Record instance to be copied.
**field_overrides: Fields and their values to override in the new copy.
Returns: A copy of the given record with any fields overridden.
"""
fields = field_overrides
for field in record.__slots__:
if field in field_overrides:
continue
value = getattr(record, field)
if isinstance(value, RecordClass):
# Recurse for records.
new_value = CopyRecord(value)
else:
new_value = copy.copy(value)
fields[field] = new_value
return type(record)(**fields)
|
Copies a record and its fields, recurses for any field that is a Record.
For records that have nested mutable fields, use copy.deepcopy.
Args:
record: A Record instance to be copied.
**field_overrides: Fields and their values to override in the new copy.
Returns: A copy of the given record with any fields overridden.
|
def get_child_bin_ids(self, bin_id):
"""Gets the child ``Ids`` of the given bin.
arg: bin_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the bin
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=bin_id)
return self._hierarchy_session.get_children(id_=bin_id)
|
Gets the child ``Ids`` of the given bin.
arg: bin_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the bin
raise: NotFound - ``bin_id`` not found
raise: NullArgument - ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles
|
*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
|
def fmt_repr(obj):
"""Print a orphaned string representation of an object without the
clutter of its parent object.
"""
items = ["%s = %r" % (k, v) for k, v in list(exclude_fields(obj).items())]
return "<%s: {%s}>" % (obj.__class__.__name__, ', '.join(items))
|
Print a orphaned string representation of an object without the
clutter of its parent object.
|
def _shape_text(self, text, colsep=u"\t", rowsep=u"\n",
transpose=False, skiprows=0, comments='#'):
"""Decode the shape of the given text"""
assert colsep != rowsep
out = []
text_rows = text.split(rowsep)[skiprows:]
for row in text_rows:
stripped = to_text_string(row).strip()
if len(stripped) == 0 or stripped.startswith(comments):
continue
line = to_text_string(row).split(colsep)
line = [try_to_parse(to_text_string(x)) for x in line]
out.append(line)
# Replace missing elements with np.nan's or None's
if programs.is_module_installed('numpy'):
from numpy import nan
out = list(zip_longest(*out, fillvalue=nan))
else:
out = list(zip_longest(*out, fillvalue=None))
# Tranpose the last result to get the expected one
out = [[r[col] for r in out] for col in range(len(out[0]))]
if transpose:
return [[r[col] for r in out] for col in range(len(out[0]))]
return out
|
Decode the shape of the given text
|
def open_resource(self, filename, mode='r'):
"""Open a file and also save it as a resource.
Opens a file, reports it to the observers as a resource, and returns
the opened file.
In Sacred terminology a resource is a file that the experiment needed
to access during a run. In case of a MongoObserver that means making
sure the file is stored in the database (but avoiding duplicates) along
its path and md5 sum.
This function can only be called during a run, and just calls the
:py:meth:`sacred.run.Run.open_resource` method.
Parameters
----------
filename: str
name of the file that should be opened
mode : str
mode that file will be open
Returns
-------
file
the opened file-object
"""
assert self.current_run is not None, "Can only be called during a run."
return self.current_run.open_resource(filename, mode)
|
Open a file and also save it as a resource.
Opens a file, reports it to the observers as a resource, and returns
the opened file.
In Sacred terminology a resource is a file that the experiment needed
to access during a run. In case of a MongoObserver that means making
sure the file is stored in the database (but avoiding duplicates) along
its path and md5 sum.
This function can only be called during a run, and just calls the
:py:meth:`sacred.run.Run.open_resource` method.
Parameters
----------
filename: str
name of the file that should be opened
mode : str
mode that file will be open
Returns
-------
file
the opened file-object
|
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
|
return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
|
def _validate(self):
"""Assure this is a valid VLAN header instance."""
if self.tpid.value not in (EtherType.VLAN, EtherType.VLAN_QINQ):
raise UnpackException
return
|
Assure this is a valid VLAN header instance.
|
def answering_questions(self, attempt, validation_token, quiz_submission_id, access_code=None, quiz_questions=None):
"""
Answering questions.
Provide or update an answer to one or more QuizQuestions.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - quiz_submission_id
"""ID"""
path["quiz_submission_id"] = quiz_submission_id
# REQUIRED - attempt
"""The attempt number of the quiz submission being taken. Note that this
must be the latest attempt index, as questions for earlier attempts can
not be modified."""
data["attempt"] = attempt
# REQUIRED - validation_token
"""The unique validation token you received when the Quiz Submission was
created."""
data["validation_token"] = validation_token
# OPTIONAL - access_code
"""Access code for the Quiz, if any."""
if access_code is not None:
data["access_code"] = access_code
# OPTIONAL - quiz_questions
"""Set of question IDs and the answer value.
See {Appendix: Question Answer Formats} for the accepted answer formats
for each question type."""
if quiz_questions is not None:
data["quiz_questions"] = quiz_questions
self.logger.debug("POST /api/v1/quiz_submissions/{quiz_submission_id}/questions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/quiz_submissions/{quiz_submission_id}/questions".format(**path), data=data, params=params, all_pages=True)
|
Answering questions.
Provide or update an answer to one or more QuizQuestions.
|
def session_from_client_config(client_config, scopes, **kwargs):
"""Creates a :class:`requests_oauthlib.OAuth2Session` from client
configuration loaded from a Google-format client secrets file.
Args:
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Raises:
ValueError: If the client configuration is not in the correct
format.
Returns:
Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new
oauthlib session and the validated client configuration.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
if 'web' in client_config:
config = client_config['web']
elif 'installed' in client_config:
config = client_config['installed']
else:
raise ValueError(
'Client secrets must be for a web or installed app.')
if not _REQUIRED_CONFIG_KEYS.issubset(config.keys()):
raise ValueError('Client secrets is not in the correct format.')
session = requests_oauthlib.OAuth2Session(
client_id=config['client_id'],
scope=scopes,
**kwargs)
return session, client_config
|
Creates a :class:`requests_oauthlib.OAuth2Session` from client
configuration loaded from a Google-format client secrets file.
Args:
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Raises:
ValueError: If the client configuration is not in the correct
format.
Returns:
Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new
oauthlib session and the validated client configuration.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
|
def is_checked(self) -> bool:
"""One task ran (checked)."""
if not self.redis_key_checked:
return False
value = self._red.get(self.redis_key_checked)
if not value:
return False
return True
|
One task ran (checked).
|
def get_sla_template_path(service_type=ServiceTypes.ASSET_ACCESS):
"""
Get the template for a ServiceType.
:param service_type: ServiceTypes
:return: Path of the template, str
"""
if service_type == ServiceTypes.ASSET_ACCESS:
name = 'access_sla_template.json'
elif service_type == ServiceTypes.CLOUD_COMPUTE:
name = 'compute_sla_template.json'
elif service_type == ServiceTypes.FITCHAIN_COMPUTE:
name = 'fitchain_sla_template.json'
else:
raise ValueError(f'Invalid/unsupported service agreement type {service_type}')
return os.path.join(os.path.sep, *os.path.realpath(__file__).split(os.path.sep)[1:-1], name)
|
Get the template for a ServiceType.
:param service_type: ServiceTypes
:return: Path of the template, str
|
def get_aside(self, aside_usage_id):
"""
Create an XBlockAside in this runtime.
The `aside_usage_id` is used to find the Aside class and data.
"""
aside_type = self.id_reader.get_aside_type_from_usage(aside_usage_id)
xblock_usage = self.id_reader.get_usage_id_from_aside(aside_usage_id)
xblock_def = self.id_reader.get_definition_id(xblock_usage)
aside_def_id, aside_usage_id = self.id_generator.create_aside(xblock_def, xblock_usage, aside_type)
keys = ScopeIds(self.user_id, aside_type, aside_def_id, aside_usage_id)
block = self.create_aside(aside_type, keys)
return block
|
Create an XBlockAside in this runtime.
The `aside_usage_id` is used to find the Aside class and data.
|
def unstash_index(self, sync=False, branch=None):
"""Returns an unstash index if one is available."""
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7]
|
Returns an unstash index if one is available.
|
def flush_headers(self, sync: bool = False) -> None:
"""
通过异步写入 header
"""
if self._headers_sent:
return
self._headers_sent = True
self.handel_default()
self.write(
b"HTTP/%s %d %s\r\n" % (
encode_str(self._version),
self._status,
self._message,
),
sync,
)
for name, value in self._headers.items():
name_byte = encode_str(name)
if isinstance(value, list):
for val in value:
self.write(
b"%s: %s\r\n" % (
name_byte,
encode_str(val),
),
sync,
)
else:
val = value
self.write(
b"%s: %s\r\n" % (
name_byte,
encode_str(value),
),
sync,
)
self.write(b"\r\n", sync)
|
通过异步写入 header
|
def runSearchRnaQuantificationSets(self, request):
"""
Returns a SearchRnaQuantificationSetsResponse for the specified
SearchRnaQuantificationSetsRequest object.
"""
return self.runSearchRequest(
request, protocol.SearchRnaQuantificationSetsRequest,
protocol.SearchRnaQuantificationSetsResponse,
self.rnaQuantificationSetsGenerator)
|
Returns a SearchRnaQuantificationSetsResponse for the specified
SearchRnaQuantificationSetsRequest object.
|
def scaffold_hits(searches, fasta, max_hits):
"""
get hits from each search against each RP
scaffolds[scaffold] = # ORfs
s2rp[scaffold] = {rp:[hits]}
"""
# initialize
## scaffolds[scaffold] = # ORFs
scaffolds = {}
for seq in parse_fasta(fasta):
scaffold = seq[0].split()[0].split('>', 1)[1].rsplit('_', 1)[0]
if scaffold not in scaffolds:
scaffolds[scaffold] = 0
scaffolds[scaffold] += 1
s2rp = {s: {r[0]: []
for r in searches}
for s in scaffolds}
# get hits from blast
for search in searches:
rp, blast = search
hits = [i for i in numblast(open(blast), max_hits, evalue_thresh, bit_thresh)]
for hit in hits:
s = hit[0].split()[0].rsplit('_', 1)[0]
hit[10], hit[11] = float(hit[10]), float(hit[11])
s2rp[s][rp].append(hit)
return scaffolds, s2rp
|
get hits from each search against each RP
scaffolds[scaffold] = # ORfs
s2rp[scaffold] = {rp:[hits]}
|
def querysets_from_title_prefix(title_prefix=None, model=DEFAULT_MODEL, app=DEFAULT_APP):
"""Return a list of Querysets from a list of model numbers"""
if title_prefix is None:
title_prefix = [None]
filter_dicts = []
model_list = []
if isinstance(title_prefix, basestring):
title_prefix = title_prefix.split(',')
elif not isinstance(title_prefix, dict):
title_prefix = title_prefix
if isinstance(title_prefix, (list, tuple)):
for i, title_prefix in enumerate(title_prefix):
if isinstance(title_prefix, basestring):
if title_prefix.lower().endswith('sales'):
title_prefix = title_prefix[:-5].strip('_')
title_prefix += [title_prefix]
model_list += ['WikiItem']
else:
model_list += [DEFAULT_MODEL]
filter_dicts += [{'model__startswith': title_prefix}]
elif isinstance(title_prefix, dict):
filter_dicts = [title_prefix]
elif isinstance(title_prefix, (list, tuple)):
filter_dicts = util.listify(title_prefix)
model = get_model(model, app)
querysets = []
for filter_dict, model in zip(filter_dicts, model_list):
filter_dict = filter_dict or {}
querysets += [model.objects.filter(**filter_dict)]
|
Return a list of Querysets from a list of model numbers
|
def iodp_kly4s_lore(kly4s_file, meas_out='measurements.txt',
spec_infile='specimens.txt', spec_out='specimens.txt', instrument='IODP-KLY4S',
actual_volume="",dir_path='.', input_dir_path=''):
"""
Converts ascii files generated by SUFAR ver.4.0 and downloaded from the LIMS online
repository to MagIC (datamodel 3) files
Parameters
----------
kly4s_file : str
input LORE downloaded csv file, required
meas_output : str
measurement output filename, default "measurements.txt"
spec_infile : str
specimen infile, default specimens.txt
[file created by iodp_samples_csv from LORE downloaded sample file]
spec_outfile : str
specimen outfile, default "kly4s_specimens.txt"
instrument : str
instrument name, default ""
actual_volume : float
the nominal volume is assumed to be 8cc or even 10cc, depending on the shipboard
software used, actual_vol is the actual specimen volume in cc
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, file name written)
"""
# initialize defaults
version_num = pmag.get_version()
# format variables
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
# set up required columns
meas_reqd_columns=['specimen','measurement','experiment','sequence','quality','method_codes',\
'instrument_codes','citations',\
'treat_temp','treat_ac_field','treat_dc_field',\
'treat_dc_field_phi','treat_dc_field_theta','meas_temp',\
'dir_dec','dir_inc','magn_moment','magn_volume',\
'description','timestamp','software_packages',\
'external_database_ids','experiments','treat_step_num']
spec_reqd_columns=['specimen','sample','result_quality','method_codes','volume',\
'specimen_name_alternatives','citations',\
'aniso_type','aniso_s_n_measurements',\
'azimuth','dip','aniso_s_sigma',\
'aniso_s_unit','aniso_tilt_correction',]
# sort out file paths
kly4s_file = pmag.resolve_file_name(kly4s_file, input_dir_path)
spec_out = pmag.resolve_file_name(spec_out, dir_path)
spec_file=pmag.resolve_file_name(spec_infile, dir_path)
meas_out = pmag.resolve_file_name(meas_out, dir_path)
# read in necessary data:
specs=pd.read_csv(spec_file,sep='\t',header=1) # read in existing specimen table
if len(specs)==0:
print ('you must download and process the samples table from LORE prior to using this')
print ('see convert_2_magic.iodp_samples_csv for help')
return False
LORE_specimens=list(specs.specimen.unique())
in_df=pd.read_csv(kly4s_file)
if len(in_df)==0:
print ('you must download a csv file from the LIMS database and place it in your input_dir_path')
return False
measurements_df=pd.DataFrame(columns=meas_reqd_columns)
specimens_df=pd.DataFrame(columns=spec_reqd_columns)
hole,kly4s_specimens=iodp_sample_names(in_df)
for spec in list(kly4s_specimens.unique()):
if spec not in LORE_specimens:
print (spec, ' not found in specimen table')
# set up defaults
# specimens table defaults
specimens_df['specimen']=kly4s_specimens
specimens_df['sample']=kly4s_specimens
specimens_df['result_quality']='g'
specimens_df['citations']='This study'
specimens_df['aniso_type']='AMS'
specimens_df['azimuth']=0
specimens_df['dip']=0
specimens_df['aniso_s_n_measurements']=192
specimens_df['aniso_tilt_correction']=0
specimens_df['aniso_s_unit']='SI'
specimens_df['aniso_s_sigma']=''
specimens_df['method_codes']= "LP-X:AE-H:LP-AN-MS"
specimens_df['experiments']=specimens_df['specimen'].astype('str')+'_'+ "LP-AN-MS"
# measurements table
measurements_df['specimen']=kly4s_specimens
measurements_df['quality']='g'
measurements_df['citations']='This study'
measurements_df['meas_temp']=273
measurements_df['software_packages']=version_num
measurements_df["treat_temp"] = '%8.3e' % (273) # room temp in kelvin
measurements_df["meas_temp"] = '%8.3e' % (273) # room temp in kelvin
measurements_df["treat_ac_field"] = '0'
measurements_df["treat_dc_field"] = '0'
measurements_df["treat_dc_field_phi"] = '0'
measurements_df["treat_dc_field_theta"] = '0'
measurements_df["treat_step_num"] = '1'
measurements_df["standard"] = 'u' # assume all data are "good"
measurements_df['instrument_codes']="IODP-KLY4S" # assume all measurements on shipboard KLY4S
measurements_df['description']='Bulk sucsecptibility measurement'
measurements_df['method_codes']='LP-X'
measurements_df['experiment']=measurements_df['specimen'].astype('str')+'_'+\
measurements_df['method_codes'].astype('str')
meas_num=range(len(kly4s_specimens))
measurements_df['sequence']=meas_num
measurements_df['measurement']=measurements_df['experiment'].astype('str')+'-'+\
measurements_df['sequence'].astype('str')
# parse the measurement data into columns
nominal_volume=in_df['Sample volume (CC)']*1e-6 # convert cc to m^3
if actual_volume:
actual_volume=(1e-6*actual_volume)
factor=nominal_volume/actual_volume
else:
actual_volume=nominal_volume
factor=1
measurements_df['susc_chi_volume']=in_df['Bulk susceptibility(SI)']*factor
measurements_df['external_database_ids']='LORE['+in_df['Test No.'].astype('str')+']'
# parse specimen data into columns
specimens_df['specimen_name_alternatives']=in_df['Text ID']
specimens_df['volume']=actual_volume
s1=in_df['Normalized tensor K11']
s2=in_df['Normalized tensor K22']
s3=in_df['Normalized tensor K33']
s4=in_df['Normalized tensor K12']
s5=in_df['Normalized tensor K23']
s6=in_df['Normalized tensor K13']
if 'Standard deviation(SI)' in in_df.columns:
specimens_df['aniso_s_sigma']=in_df['Standard deviation(SI)']
#if (s1+s2+s3)>.35: # AGICO format
# s1=s1/3
# s2=s2/3
# s3=s3/3
# s4=s4/3
# s5=s5/3
# s6=s6/3
specimens_df['aniso_s'] = s1.astype('str')+':'+ s2.astype('str')+':'+s3.astype('str')+':'+\
s4.astype('str')+':'+ s5.astype('str')+':'+ s6.astype('str')
tau1=in_df['Kmax susceptibility (SI)']/3
v1_dec=in_df['Kmax dec (deg)']
v1_inc=in_df['Kmax inc (deg)']
specimens_df['aniso_v1']=tau1.astype('str')+":"+v1_dec.astype('str')+":"+v1_inc.astype('str')
tau2=in_df['Kint susceptibility (SI)']/3
v2_dec=in_df['Kint dec (deg)']
v2_inc=in_df['Kint inc (deg)']
specimens_df['aniso_v2']=tau2.astype('str')+":"+v2_dec.astype('str')+":"+v2_inc.astype('str')
tau3=in_df['Kmin susceptibility (SI)']/3
v3_dec=in_df['Kmin dec (deg)']
v3_inc=in_df['Kmin inc (deg)']
specimens_df['aniso_v3']=tau3.astype('str')+":"+v3_dec.astype('str')+":"+v3_inc.astype('str')
# output data files
measurements_df.fillna("",inplace=True)
meas_dicts = measurements_df.to_dict('records')
pmag.magic_write(meas_out, meas_dicts, 'measurements')
specimens_df.fillna("",inplace=True)
spec_dicts = specimens_df.to_dict('records')
pmag.magic_write(spec_out, spec_dicts, 'specimens')
return True
|
Converts ascii files generated by SUFAR ver.4.0 and downloaded from the LIMS online
repository to MagIC (datamodel 3) files
Parameters
----------
kly4s_file : str
input LORE downloaded csv file, required
meas_output : str
measurement output filename, default "measurements.txt"
spec_infile : str
specimen infile, default specimens.txt
[file created by iodp_samples_csv from LORE downloaded sample file]
spec_outfile : str
specimen outfile, default "kly4s_specimens.txt"
instrument : str
instrument name, default ""
actual_volume : float
the nominal volume is assumed to be 8cc or even 10cc, depending on the shipboard
software used, actual_vol is the actual specimen volume in cc
dir_path : str
output directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, file name written)
|
def list(self):
"""
List all device management extension packages
"""
url = "api/v0002/mgmt/custom/bundle"
r = self._apiClient.get(url)
if r.status_code == 200:
return r.json()
else:
raise ApiException(r)
|
List all device management extension packages
|
def sort_response(response: Dict[str, Any]) -> OrderedDict:
"""
Sort the keys in a JSON-RPC response object.
This has no effect other than making it nicer to read. Useful in Python 3.5 only,
dictionaries are already sorted in newer Python versions.
Example::
>>> json.dumps(sort_response({'id': 2, 'result': 5, 'jsonrpc': '2.0'}))
{"jsonrpc": "2.0", "result": 5, "id": 1}
Args:
response: Deserialized JSON-RPC response.
Returns:
The same response, sorted in an OrderedDict.
"""
root_order = ["jsonrpc", "result", "error", "id"]
error_order = ["code", "message", "data"]
req = OrderedDict(sorted(response.items(), key=lambda k: root_order.index(k[0])))
if "error" in response:
req["error"] = OrderedDict(
sorted(response["error"].items(), key=lambda k: error_order.index(k[0]))
)
return req
|
Sort the keys in a JSON-RPC response object.
This has no effect other than making it nicer to read. Useful in Python 3.5 only,
dictionaries are already sorted in newer Python versions.
Example::
>>> json.dumps(sort_response({'id': 2, 'result': 5, 'jsonrpc': '2.0'}))
{"jsonrpc": "2.0", "result": 5, "id": 1}
Args:
response: Deserialized JSON-RPC response.
Returns:
The same response, sorted in an OrderedDict.
|
def register_handler(self, name, handler, esc_strings):
"""Register a handler instance by name with esc_strings."""
self._handlers[name] = handler
for esc_str in esc_strings:
self._esc_handlers[esc_str] = handler
|
Register a handler instance by name with esc_strings.
|
def finalize(self, process_row = None):
"""
Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned.
"""
if process_row is not None:
process_id = process_row.process_id
elif self.process is not None:
process_id = self.process.process_id
else:
raise ValueError("must supply a process row to .__init__()")
#
# ensure ID generators are synchronized with table contents
#
self.segment_def_table.sync_next_id()
self.segment_table.sync_next_id()
self.segment_sum_table.sync_next_id()
#
# put all segment lists in time order
#
self.sort()
#
# generator function to convert segments into row objects,
# each paired with the table to which the row is to be
# appended
#
def row_generator(segs, target_table, process_id, segment_def_id):
id_column = target_table.next_id.column_name
for seg in segs:
row = target_table.RowType()
row.segment = seg
row.process_id = process_id
row.segment_def_id = segment_def_id
setattr(row, id_column, target_table.get_next_id())
if 'comment' in target_table.validcolumns:
row.comment = None
yield row, target_table
#
# populate the segment_definer table from the list of
# LigolwSegmentList objects and construct a matching list
# of table row generators. empty ourselves to prevent this
# process from being repeated
#
row_generators = []
while self:
ligolw_segment_list = self.pop()
segment_def_row = self.segment_def_table.RowType()
segment_def_row.process_id = process_id
segment_def_row.segment_def_id = self.segment_def_table.get_next_id()
segment_def_row.instruments = ligolw_segment_list.instruments
segment_def_row.name = ligolw_segment_list.name
segment_def_row.version = ligolw_segment_list.version
segment_def_row.comment = ligolw_segment_list.comment
self.segment_def_table.append(segment_def_row)
row_generators.append(row_generator(ligolw_segment_list.valid, self.segment_sum_table, process_id, segment_def_row.segment_def_id))
row_generators.append(row_generator(ligolw_segment_list.active, self.segment_table, process_id, segment_def_row.segment_def_id))
#
# populate segment and segment_summary tables by pulling
# rows from the generators in time order
#
for row, target_table in iterutils.inorder(*row_generators):
target_table.append(row)
|
Restore the LigolwSegmentList objects to the XML tables in
preparation for output. All segments from all segment
lists are inserted into the tables in time order, but this
is NOT behaviour external applications should rely on.
This is done simply in the belief that it might assist in
constructing well balanced indexed databases from the
resulting files. If that proves not to be the case, or for
some reason this behaviour proves inconvenient to preserve,
then it might be discontinued without notice. You've been
warned.
|
def delete(queue, items):
'''
Delete an item or items from a queue
'''
con = _conn(queue)
with con:
cur = con.cursor()
if isinstance(items, six.string_types):
items = _quote_escape(items)
cmd = """DELETE FROM {0} WHERE name = '{1}'""".format(queue, items)
log.debug('SQL Query: %s', cmd)
cur.execute(cmd)
return True
if isinstance(items, list):
items = [_quote_escape(el) for el in items]
cmd = 'DELETE FROM {0} WHERE name = ?'.format(queue)
log.debug('SQL Query: %s', cmd)
newitems = []
for item in items:
newitems.append((item,))
# we need a list of one item tuples here
cur.executemany(cmd, newitems)
if isinstance(items, dict):
items = salt.utils.json.dumps(items).replace('"', "'")
items = _quote_escape(items)
cmd = ("""DELETE FROM {0} WHERE name = '{1}'""").format(queue, items) # future lint: disable=blacklisted-function
log.debug('SQL Query: %s', cmd)
cur.execute(cmd)
return True
return True
|
Delete an item or items from a queue
|
def rasterize(path,
pitch,
origin,
resolution=None,
fill=True,
width=None):
"""
Rasterize a Path2D object into a boolean image ("mode 1").
Parameters
------------
path: Path2D object
pitch: float, length in model space of a pixel edge
origin: (2,) float, origin position in model space
resolution: (2,) int, resolution in pixel space
fill: bool, if True will return closed regions as filled
width: int, if not None will draw outline this wide (pixels)
Returns
------------
raster: PIL.Image object, mode 1
"""
# check inputs
pitch = float(pitch)
origin = np.asanyarray(origin, dtype=np.float64)
# if resolution is None make it larger than path
if resolution is None:
span = np.vstack((path.bounds,
origin)).ptp(
axis=0)
resolution = np.ceil(span / pitch) + 2
resolution = np.asanyarray(resolution,
dtype=np.int64)
resolution = tuple(resolution.tolist())
# convert all discrete paths to pixel space
discrete = [((i - origin) / pitch).astype(np.int)
for i in path.discrete]
# draw the exteriors
exteriors = Image.new(mode='1', size=resolution)
edraw = ImageDraw.Draw(exteriors)
# if a width is specified draw the outline
if width is not None:
width = int(width)
for coords in discrete:
edraw.line(coords.flatten().tolist(),
fill=1,
width=width)
# if we are not filling the polygon exit
if not fill:
del edraw
return exteriors
# the path indexes that are exteriors
# needed to know what to fill/empty but expensive
roots = path.root
# draw the interiors
interiors = Image.new(mode='1', size=resolution)
idraw = ImageDraw.Draw(interiors)
for i, points in enumerate(discrete):
# draw the polygon on either the exterior or
# interior image buffer
if i in roots:
edraw.polygon(points.flatten().tolist(),
fill=1)
else:
idraw.polygon(points.flatten().tolist(),
fill=1)
# clean up the draw objects
# this is in their examples, I have no idea if
# it is actually necessary
del edraw
del idraw
# the final result is the exteriors minus the interiors
raster = ImageChops.subtract(exteriors, interiors)
return raster
|
Rasterize a Path2D object into a boolean image ("mode 1").
Parameters
------------
path: Path2D object
pitch: float, length in model space of a pixel edge
origin: (2,) float, origin position in model space
resolution: (2,) int, resolution in pixel space
fill: bool, if True will return closed regions as filled
width: int, if not None will draw outline this wide (pixels)
Returns
------------
raster: PIL.Image object, mode 1
|
def remove_import_statements(code):
"""Removes lines with import statements from the code.
Args:
code: The code to be stripped.
Returns:
The code without import statements.
"""
new_code = []
for line in code.splitlines():
if not line.lstrip().startswith('import ') and \
not line.lstrip().startswith('from '):
new_code.append(line)
while new_code and new_code[0] == '':
new_code.pop(0)
while new_code and new_code[-1] == '':
new_code.pop()
return '\n'.join(new_code)
|
Removes lines with import statements from the code.
Args:
code: The code to be stripped.
Returns:
The code without import statements.
|
def config_stop(args):
'''Abort a task (method configuration) by submission ID in given space'''
r = fapi.abort_submission(args.project, args.workspace,
args.submission_id)
fapi._check_response_code(r, 204)
return ("Aborted {0} in {1}/{2}".format(args.submission_id,
args.project,
args.workspace))
|
Abort a task (method configuration) by submission ID in given space
|
def getprop(self, prop_name):
"""Get a property of the device.
This is a convenience wrapper for "adb shell getprop xxx".
Args:
prop_name: A string that is the name of the property to get.
Returns:
A string that is the value of the property, or None if the property
doesn't exist.
"""
return self.shell(
['getprop', prop_name],
timeout=DEFAULT_GETPROP_TIMEOUT_SEC).decode('utf-8').strip()
|
Get a property of the device.
This is a convenience wrapper for "adb shell getprop xxx".
Args:
prop_name: A string that is the name of the property to get.
Returns:
A string that is the value of the property, or None if the property
doesn't exist.
|
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
node.elif_ = []
node.else_ = []
token = next(self.stream)
if token.test('name:elif'):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
elif token.test('name:else'):
result.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
break
return result
|
Parse an if construct.
|
def camel_to_underscore(string):
"""Convert camelcase to lowercase and underscore.
Recipe from http://stackoverflow.com/a/1176023
Args:
string (str): The string to convert.
Returns:
str: The converted string.
"""
string = FIRST_CAP_RE.sub(r'\1_\2', string)
return ALL_CAP_RE.sub(r'\1_\2', string).lower()
|
Convert camelcase to lowercase and underscore.
Recipe from http://stackoverflow.com/a/1176023
Args:
string (str): The string to convert.
Returns:
str: The converted string.
|
def json_as_html(self):
""" Print out self.json in a nice way. """
# To avoid circular import
from cspreports import utils
formatted_json = utils.format_report(self.json)
return mark_safe("<pre>\n%s</pre>" % escape(formatted_json))
|
Print out self.json in a nice way.
|
def as_dict(self, verbosity=1, fmt=None, **kwargs):
"""
Dict representation of Structure.
Args:
verbosity (int): Verbosity level. Default of 1 includes both
direct and cartesian coordinates for all sites, lattice
parameters, etc. Useful for reading and for insertion into a
database. Set to 0 for an extremely lightweight version
that only includes sufficient information to reconstruct the
object.
fmt (str): Specifies a format for the dict. Defaults to None,
which is the default format used in pymatgen. Other options
include "abivars".
**kwargs: Allow passing of other kwargs needed for certain
formats, e.g., "abivars".
Returns:
JSON serializable dict representation.
"""
if fmt == "abivars":
"""Returns a dictionary with the ABINIT variables."""
from pymatgen.io.abinit.abiobjects import structure_to_abivars
return structure_to_abivars(self, **kwargs)
latt_dict = self._lattice.as_dict(verbosity=verbosity)
del latt_dict["@module"]
del latt_dict["@class"]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"lattice": latt_dict, "sites": []}
for site in self:
site_dict = site.as_dict(verbosity=verbosity)
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
|
Dict representation of Structure.
Args:
verbosity (int): Verbosity level. Default of 1 includes both
direct and cartesian coordinates for all sites, lattice
parameters, etc. Useful for reading and for insertion into a
database. Set to 0 for an extremely lightweight version
that only includes sufficient information to reconstruct the
object.
fmt (str): Specifies a format for the dict. Defaults to None,
which is the default format used in pymatgen. Other options
include "abivars".
**kwargs: Allow passing of other kwargs needed for certain
formats, e.g., "abivars".
Returns:
JSON serializable dict representation.
|
def set_plugins(self, input_plugins):
"""Set the plugin list according to the Glances server."""
header = "glances_"
for item in input_plugins:
# Import the plugin
try:
plugin = __import__(header + item)
except ImportError:
# Server plugin can not be imported from the client side
logger.error("Can not import {} plugin. Please upgrade your Glances client/server version.".format(item))
else:
# Add the plugin to the dictionary
# The key is the plugin name
# for example, the file glances_xxx.py
# generate self._plugins_list["xxx"] = ...
logger.debug("Server uses {} plugin".format(item))
self._plugins[item] = plugin.Plugin(args=self.args)
# Restoring system path
sys.path = sys_path
|
Set the plugin list according to the Glances server.
|
def greenhall_sx(t, F, alpha):
""" Eqn (8) from Greenhall2004
"""
if F == float('inf'):
return greenhall_sw(t, alpha+2)
a = 2*greenhall_sw(t, alpha)
b = greenhall_sw(t-1.0/float(F), alpha)
c = greenhall_sw(t+1.0/float(F), alpha)
return pow(F, 2)*(a-b-c)
|
Eqn (8) from Greenhall2004
|
def _get_images_dir():
'''
Extract the images dir from the configuration. First attempts to
find legacy virt.images, then tries virt:images.
'''
img_dir = __salt__['config.option']('virt.images')
if img_dir:
salt.utils.versions.warn_until(
'Sodium',
'\'virt.images\' has been deprecated in favor of '
'\'virt:images\'. \'virt.images\' will stop '
'being used in {version}.')
else:
img_dir = __salt__['config.get']('virt:images')
log.debug('Image directory from config option `virt:images`'
' is %s', img_dir)
return img_dir
|
Extract the images dir from the configuration. First attempts to
find legacy virt.images, then tries virt:images.
|
def configure(conf, channel=False, group=False, fm_integration=False):
"""Guide user to set up the bot, saves configuration at `conf`.
# Arguments
conf (str): Path where to save the configuration file. May contain `~` for
user's home.
channel (Optional[bool]): Configure a channel.
group (Optional[bool]): Configure a group.
fm_integration (Optional[bool]): Setup file manager integration.
"""
conf = expanduser(conf) if conf else get_config_path()
prompt = "❯ " if not sys.platform.startswith("win32") else "> "
contact_url = "https://telegram.me/"
print("Talk with the {} on Telegram ({}), create a bot and insert the token"
.format(markup("BotFather", "cyan"), contact_url + "BotFather"))
try:
token = input(markup(prompt, "magenta")).strip()
except UnicodeEncodeError:
# some users can only display ASCII
prompt = "> "
token = input(markup(prompt, "magenta")).strip()
try:
bot = telegram.Bot(token)
bot_name = bot.get_me().username
except:
print(markup("Something went wrong, please try again.\n", "red"))
return configure()
print("Connected with {}.\n".format(markup(bot_name, "cyan")))
if channel:
print("Do you want to send to a {} or a {} channel? [pub/priv]"
.format(markup("public", "bold"), markup("private", "bold")))
channel_type = input(markup(prompt, "magenta")).strip()
if channel_type.startswith("pub"):
print("\nEnter your channel's public name or link:")
chat_id = input(markup(prompt, "magenta")).strip()
if "/" in chat_id:
chat_id = "@" + chat_id.split("/")[-1]
elif chat_id.startswith("@"):
pass
else:
chat_id = "@" + chat_id
else:
print("\nOpen https://web.telegram.org in your browser, sign in and open your private channel."
"\nNow copy the URL in the address bar and enter it here:")
url = input(markup(prompt, "magenta")).strip()
chat_id = "-100" + re.match(".+web\.telegram\.org\/#\/im\?p=c(\d+)", url).group(1)
authorized = False
while not authorized:
try:
bot.send_chat_action(chat_id=chat_id, action="typing")
authorized = True
except (telegram.error.Unauthorized, telegram.error.BadRequest):
# Telegram returns a BadRequest when a non-admin bot tries to send to a private channel
input("Please add {} as administrator to your channel and press Enter"
.format(markup(bot_name, "cyan")))
print(markup("\nCongratulations! telegram-send can now post to your channel!", "green"))
else:
password = "".join([str(randint(0, 9)) for _ in range(5)])
bot_url = contact_url + bot_name
fancy_bot_name = markup(bot_name, "cyan")
if group:
password = "/{}@{}".format(password, bot_name)
print("Please add {} to your group\nand send the following message to the group: {}\n"
.format(fancy_bot_name, markup(password, "bold")))
else:
print("Please add {} on Telegram ({})\nand send it the password: {}\n"
.format(fancy_bot_name, bot_url, markup(password, "bold")))
update, update_id = None, None
def get_user():
updates = bot.get_updates(offset=update_id, timeout=10)
for update in updates:
if update.message:
if update.message.text == password:
return update, None
if len(updates) > 0:
return None, updates[-1].update_id + 1
else:
return None, None
while update is None:
try:
update, update_id = get_user()
except Exception as e:
print("Error! {}".format(e))
chat_id = update.message.chat_id
user = update.message.from_user.username or update.message.from_user.first_name
m = ("Congratulations {}! ".format(user), "\ntelegram-send is now ready for use!")
ball = "🎊"
print(markup("".join(m), "green"))
bot.send_message(chat_id=chat_id, text=ball + " " + m[0] + ball + m[1])
config = configparser.ConfigParser()
config.add_section("telegram")
config.set("telegram", "TOKEN", token)
config.set("telegram", "chat_id", str(chat_id))
# above 3 lines in py3: config["telegram"] = {"TOKEN": token, "chat_id": chat_id}
conf_dir = dirname(conf)
if conf_dir:
makedirs_check(conf_dir)
with open(conf, "w") as f:
config.write(f)
if fm_integration:
if not sys.platform.startswith("win32"):
return integrate_file_manager()
|
Guide user to set up the bot, saves configuration at `conf`.
# Arguments
conf (str): Path where to save the configuration file. May contain `~` for
user's home.
channel (Optional[bool]): Configure a channel.
group (Optional[bool]): Configure a group.
fm_integration (Optional[bool]): Setup file manager integration.
|
def gen_batch(data, batch_size, maxiter=np.inf, random_state=None):
"""
Create random batches for Stochastic gradients.
Batch index generator for SGD that will yeild random batches for a
a defined number of iterations, which can be infinite. This generator makes
consecutive passes through the data, drawing without replacement on each
pass.
Parameters
----------
data : ndarray or sequence of ndarrays
The data, can be a matrix X, (X,y) tuples etc
batch_size : int
number of data points in each batch.
maxiter : int, optional
The number of iterations
random_state : int or RandomState, optional
random seed
Yields
------
ndarray or sequence :
with each array length ``batch_size``, i.e. a subset of data.
"""
perms = endless_permutations(_len_data(data), random_state)
it = 0
while it < maxiter:
it += 1
ind = np.array([next(perms) for _ in range(batch_size)])
yield _split_data(data, ind)
|
Create random batches for Stochastic gradients.
Batch index generator for SGD that will yeild random batches for a
a defined number of iterations, which can be infinite. This generator makes
consecutive passes through the data, drawing without replacement on each
pass.
Parameters
----------
data : ndarray or sequence of ndarrays
The data, can be a matrix X, (X,y) tuples etc
batch_size : int
number of data points in each batch.
maxiter : int, optional
The number of iterations
random_state : int or RandomState, optional
random seed
Yields
------
ndarray or sequence :
with each array length ``batch_size``, i.e. a subset of data.
|
def get_file(profile, branch, file_path):
"""Get a file from a branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
branch
The name of a branch.
file_path
The path of the file to fetch.
Returns:
The (UTF-8 encoded) content of the file, as a string.
"""
branch_sha = get_branch_sha(profile, branch)
tree = get_files_in_branch(profile, branch_sha)
match = None
for item in tree:
if item.get("path") == file_path:
match = item
break
file_sha = match.get("sha")
blob = blobs.get_blob(profile, file_sha)
content = blob.get("content")
decoded_content = b64decode(content)
return decoded_content.decode("utf-8")
|
Get a file from a branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
branch
The name of a branch.
file_path
The path of the file to fetch.
Returns:
The (UTF-8 encoded) content of the file, as a string.
|
def bfs_conditional(G, source, reverse=False, keys=True, data=False,
yield_nodes=True, yield_if=None,
continue_if=None, visited_nodes=None,
yield_source=False):
"""
Produce edges in a breadth-first-search starting at source, but only return
nodes that satisfiy a condition, and only iterate past a node if it
satisfies a different condition.
conditions are callables that take (G, child, edge) and return true or false
CommandLine:
python -m utool.util_graph bfs_conditional
Example:
>>> # DISABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4)])
>>> continue_if = lambda G, child, edge: True
>>> result = list(ut.bfs_conditional(G, 1, yield_nodes=False))
>>> print(result)
[(1, 2), (1, 3), (2, 1), (2, 3), (2, 4), (3, 1), (3, 2), (4, 2)]
Example:
>>> # ENABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> continue_if = lambda G, child, edge: (child % 2 == 0)
>>> yield_if = lambda G, child, edge: (child % 2 == 1)
>>> G.add_edges_from([(0, 1), (1, 3), (3, 5), (5, 10),
>>> (4, 3), (3, 6),
>>> (0, 2), (2, 4), (4, 6), (6, 10)])
>>> result = list(ut.bfs_conditional(G, 0, continue_if=continue_if,
>>> yield_if=yield_if))
>>> print(result)
[1, 3, 5]
"""
if reverse and hasattr(G, 'reverse'):
G = G.reverse()
if isinstance(G, nx.Graph):
neighbors = functools.partial(G.edges, data=data)
else:
neighbors = functools.partial(G.edges, keys=keys, data=data)
queue = collections.deque([])
if visited_nodes is None:
visited_nodes = set([])
else:
visited_nodes = set(visited_nodes)
if source not in visited_nodes:
if yield_nodes and yield_source:
yield source
visited_nodes.add(source)
new_edges = neighbors(source)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue.append((source, new_edges))
while queue:
parent, edges = queue[0]
for edge in edges:
child = edge[1]
if yield_nodes:
if child not in visited_nodes:
if yield_if is None or yield_if(G, child, edge):
yield child
else:
if yield_if is None or yield_if(G, child, edge):
yield edge
if child not in visited_nodes:
visited_nodes.add(child)
# Add new children to queue if the condition is satisfied
if continue_if is None or continue_if(G, child, edge):
new_edges = neighbors(child)
if isinstance(new_edges, list):
new_edges = iter(new_edges)
queue.append((child, new_edges))
queue.popleft()
|
Produce edges in a breadth-first-search starting at source, but only return
nodes that satisfiy a condition, and only iterate past a node if it
satisfies a different condition.
conditions are callables that take (G, child, edge) and return true or false
CommandLine:
python -m utool.util_graph bfs_conditional
Example:
>>> # DISABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4)])
>>> continue_if = lambda G, child, edge: True
>>> result = list(ut.bfs_conditional(G, 1, yield_nodes=False))
>>> print(result)
[(1, 2), (1, 3), (2, 1), (2, 3), (2, 4), (3, 1), (3, 2), (4, 2)]
Example:
>>> # ENABLE_DOCTEST
>>> import networkx as nx
>>> import utool as ut
>>> G = nx.Graph()
>>> continue_if = lambda G, child, edge: (child % 2 == 0)
>>> yield_if = lambda G, child, edge: (child % 2 == 1)
>>> G.add_edges_from([(0, 1), (1, 3), (3, 5), (5, 10),
>>> (4, 3), (3, 6),
>>> (0, 2), (2, 4), (4, 6), (6, 10)])
>>> result = list(ut.bfs_conditional(G, 0, continue_if=continue_if,
>>> yield_if=yield_if))
>>> print(result)
[1, 3, 5]
|
def set_home_location(self):
'''set home location from last map click'''
try:
latlon = self.module('map').click_position
except Exception:
print("No map available")
return
lat = float(latlon[0])
lon = float(latlon[1])
if self.wploader.count() == 0:
self.wploader.add_latlonalt(lat, lon, 0)
w = self.wploader.wp(0)
w.x = lat
w.y = lon
self.wploader.set(w, 0)
self.loading_waypoints = True
self.loading_waypoint_lasttime = time.time()
self.master.mav.mission_write_partial_list_send(self.target_system,
self.target_component,
0, 0)
|
set home location from last map click
|
def post(ctx, uri, input_file):
"""POST file data to a specific URI
Note that POST is not used for most web services URIs. Instead,
PUT is used for creating resources.
"""
http_client = get_wva(ctx).get_http_client()
cli_pprint(http_client.post(uri, input_file.read()))
|
POST file data to a specific URI
Note that POST is not used for most web services URIs. Instead,
PUT is used for creating resources.
|
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = bCryptPasswordHasher
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
|
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
|
def deactivate(self):
"""Remove any builtins which might have been added by add_builtins, or
restore overwritten ones to their previous values."""
remove_builtin = self.remove_builtin
for key, val in self._orig_builtins.iteritems():
remove_builtin(key, val)
self._orig_builtins.clear()
self._builtins_added = False
|
Remove any builtins which might have been added by add_builtins, or
restore overwritten ones to their previous values.
|
def switch_led_on(self, ids):
""" Switches on the LED of the motors with the specified ids. """
self._set_LED(dict(zip(ids, itertools.repeat(True))))
|
Switches on the LED of the motors with the specified ids.
|
def azureContainerSAS(self, *args, **kwargs):
"""
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs)
|
Get Shared-Access-Signature for Azure Container
Get a shared access signature (SAS) string for use with a specific Azure
Blob Storage container.
The `level` parameter can be `read-write` or `read-only` and determines
which type of credentials are returned. If level is read-write, it will create the
container if it doesn't already exist.
This method gives output: ``v1/azure-container-response.json#``
This method is ``stable``
|
def container_fs_usage_bytes(self, metric, scraper_config):
"""
Number of bytes that are consumed by the container on this filesystem.
"""
metric_name = scraper_config['namespace'] + '.filesystem.usage'
if metric.type not in METRIC_TYPES:
self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
return
self._process_usage_metric(metric_name, metric, self.fs_usage_bytes, scraper_config)
|
Number of bytes that are consumed by the container on this filesystem.
|
def start(self):
'''Begin fetching the next request.'''
self._current_session = session = self._http_client.session()
request = self.next_request()
assert request
if request.url_info.password or \
request.url_info.hostname_with_port in self._hostnames_with_auth:
self._add_basic_auth_header(request)
response = yield from session.start(request)
self._process_response(response)
return response
|
Begin fetching the next request.
|
def _fill(self):
"""Advance the iterator without returning the old head."""
try:
self._head = self._iterable.next()
except StopIteration:
self._head = None
|
Advance the iterator without returning the old head.
|
def evaluate(self, item):
""" Pull the field off the item """
try:
for match in PATH_PATTERN.finditer(self.field):
path = match.group(0)
if path[0] == "[":
# If we're selecting an item at a specific index of an
# array, we will usually not get back the whole array from
# Dynamo. It'll return an array with one element.
try:
item = item[int(match.group(1))]
except IndexError:
item = item[0]
else:
item = item.get(path)
except (IndexError, TypeError, AttributeError):
return None
return item
|
Pull the field off the item
|
def check_tag_data(data):
"Raise a ValueError if DATA doesn't seem to be a well-formed ID3 tag."
if len(data) < 10:
raise ValueError("Tag too short")
if data[0:3] != b"ID3":
raise ValueError("Missing ID3 identifier")
if data[3] >= 5 or data[4] != 0:
raise ValueError("Unknown ID3 version")
length = stagger.conversion.Syncsafe.decode(data[6:10]) + 10
if len(data) != length:
raise ValueError("Tag size mismatch")
|
Raise a ValueError if DATA doesn't seem to be a well-formed ID3 tag.
|
def experiment_group_post_delete(sender, **kwargs):
"""Delete all group outputs."""
instance = kwargs['instance']
auditor.record(event_type=EXPERIMENT_GROUP_DELETED, instance=instance)
remove_bookmarks(object_id=instance.id, content_type='experimentgroup')
|
Delete all group outputs.
|
def _make_resource(self):
"""
Returns a resource instance.
"""
with self._lock:
for i in self._unavailable_range():
if self._reference_queue[i] is None:
rtracker = _ResourceTracker(
self._factory(**self._factory_arguments))
self._reference_queue[i] = rtracker
self._size += 1
return rtracker
raise PoolFullError
|
Returns a resource instance.
|
def get_internal_instances(self, phase=None):
"""Get a list of internal instances (in a specific phase)
If phase is None, return all internal instances whtever the phase
:param phase: phase to filter (never used)
:type phase:
:return: internal instances list
:rtype: list
"""
if phase is None:
return [instance for instance in self.instances if not instance.is_external]
return [instance for instance in self.instances
if not instance.is_external and phase in instance.phases and
instance not in self.to_restart]
|
Get a list of internal instances (in a specific phase)
If phase is None, return all internal instances whtever the phase
:param phase: phase to filter (never used)
:type phase:
:return: internal instances list
:rtype: list
|
def _get_template_dirs():
"""existing directories where to search for jinja2 templates. The order
is important. The first found template from the first found dir wins!"""
return filter(lambda x: os.path.exists(x), [
# user dir
os.path.join(os.path.expanduser('~'), '.py2pack', 'templates'),
# system wide dir
os.path.join('/', 'usr', 'share', 'py2pack', 'templates'),
# usually inside the site-packages dir
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'),
])
|
existing directories where to search for jinja2 templates. The order
is important. The first found template from the first found dir wins!
|
def listRemoteDatawraps(location = conf.pyGeno_REMOTE_LOCATION) :
"""Lists all the datawraps availabe from a remote a remote location."""
loc = location + "/datawraps.json"
response = urllib2.urlopen(loc)
js = json.loads(response.read())
return js
|
Lists all the datawraps availabe from a remote a remote location.
|
def _load_data(self, band):
"""From Morrissey+ 2005, with the actual data coming from
http://www.astro.caltech.edu/~capak/filters/. According to the latter,
these are in QE units and thus need to be multiplied by the wavelength
when integrating per-energy.
"""
# `band` should be 'nuv' or 'fuv'
df = bandpass_data_frame('filter_galex_' + band + '.dat', 'wlen resp')
df.resp *= df.wlen # QE -> EE response convention.
return df
|
From Morrissey+ 2005, with the actual data coming from
http://www.astro.caltech.edu/~capak/filters/. According to the latter,
these are in QE units and thus need to be multiplied by the wavelength
when integrating per-energy.
|
def delete_all_objects(self, async_=False):
"""
Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call
"""
nms = self.list_object_names(full_listing=True)
return self.object_manager.delete_all_objects(nms, async_=async_)
|
Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call
|
def disclaim_key_flags():
"""Declares that the current module will not define any more key flags.
Normally, the module that calls the DEFINE_xxx functions claims the
flag to be its key flag. This is undesirable for modules that
define additional DEFINE_yyy functions with its own flag parsers and
serializers, since that module will accidentally claim flags defined
by DEFINE_yyy as its key flags. After calling this function, the
module disclaims flag definitions thereafter, so the key flags will
be correctly attributed to the caller of DEFINE_yyy.
After calling this function, the module will not be able to define
any more flags. This function will affect all FlagValues objects.
"""
globals_for_caller = sys._getframe(1).f_globals # pylint: disable=protected-access
module, _ = _helpers.get_module_object_and_name(globals_for_caller)
_helpers.disclaim_module_ids.add(id(module))
|
Declares that the current module will not define any more key flags.
Normally, the module that calls the DEFINE_xxx functions claims the
flag to be its key flag. This is undesirable for modules that
define additional DEFINE_yyy functions with its own flag parsers and
serializers, since that module will accidentally claim flags defined
by DEFINE_yyy as its key flags. After calling this function, the
module disclaims flag definitions thereafter, so the key flags will
be correctly attributed to the caller of DEFINE_yyy.
After calling this function, the module will not be able to define
any more flags. This function will affect all FlagValues objects.
|
def find_replace_string(obj, find, replace):
"""Performs a string.replace() on the input object.
Args:
obj (object): The object to find/replace. It will be cast to ``str``.
find (str): The string to search for.
replace (str): The string to replace with.
Returns:
str: The replaced string.
"""
try:
strobj = str(obj)
newStr = string.replace(strobj, find, replace)
if newStr == strobj:
return obj
else:
return newStr
except:
line, filename, synerror = trace()
raise ArcRestHelperError({
"function": "find_replace_string",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
pass
|
Performs a string.replace() on the input object.
Args:
obj (object): The object to find/replace. It will be cast to ``str``.
find (str): The string to search for.
replace (str): The string to replace with.
Returns:
str: The replaced string.
|
def _processJobsWithRunningServices(self):
"""Get jobs whose services have started"""
while True:
jobGraph = self.serviceManager.getJobGraphWhoseServicesAreRunning(0)
if jobGraph is None: # Stop trying to get jobs when function returns None
break
logger.debug('Job: %s has established its services.', jobGraph.jobStoreID)
jobGraph.services = []
self.toilState.updatedJobs.add((jobGraph, 0))
|
Get jobs whose services have started
|
def get_random_url(ltd="com"):
"""Get a random url with the given ltd.
Args:
ltd (str): The ltd to use (e.g. com).
Returns:
str: The random url.
"""
url = [
"https://",
RandomInputHelper.get_random_value(8, [string.ascii_lowercase]),
".",
ltd
]
return "".join(url)
|
Get a random url with the given ltd.
Args:
ltd (str): The ltd to use (e.g. com).
Returns:
str: The random url.
|
def get(self, project):
"""Query the project status. Returns a ``CLAMData`` instance or raises an exception according to the returned HTTP Status code"""
try:
data = self.request(project + '/')
except:
raise
if not isinstance(data, clam.common.data.CLAMData):
raise Exception("Unable to retrieve CLAM Data")
else:
return data
|
Query the project status. Returns a ``CLAMData`` instance or raises an exception according to the returned HTTP Status code
|
def pixy_set_servos(self, s0, s1):
"""
Sends the setServos Pixy command.
This method sets the pan/tilt servos that are plugged into Pixy's two servo ports.
:param s0: value 0 to 1000
:param s1: value 0 to 1000
:returns: No return value.
"""
task = asyncio.ensure_future(self.core.pixy_set_servos(s0, s1))
self.loop.run_until_complete(task)
|
Sends the setServos Pixy command.
This method sets the pan/tilt servos that are plugged into Pixy's two servo ports.
:param s0: value 0 to 1000
:param s1: value 0 to 1000
:returns: No return value.
|
def qteMacroNameMangling(self, macroCls):
"""
Convert the class name of a macro class to macro name.
The name mangling inserts a '-' character after every capital
letter and then lowers the entire string.
Example: if the class name of ``macroCls`` is 'ThisIsAMacro'
then this method will return 'this-is-a-macro', ie. every
capital letter (except the first) will be prefixed with a
hyphen and changed to lower case.
The method returns the name mangled macro name or **None**
if an error occurred.
|Args|
* ``macroCls`` (**QtmacsMacro**): ``QtmacsMacro``- or derived
class (not an instance!)
|Returns|
**str**: the name mangled string or **None** if an error occurred.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Replace camel bump as hyphenated lower case string.
macroName = re.sub(r"([A-Z])", r'-\1', macroCls.__name__)
# If the first character of the class name was a
# capital letter (likely) then the above substitution would have
# resulted in a leading hyphen. Remove it.
if macroName[0] == '-':
macroName = macroName[1:]
# Return the lower case string.
return macroName.lower()
|
Convert the class name of a macro class to macro name.
The name mangling inserts a '-' character after every capital
letter and then lowers the entire string.
Example: if the class name of ``macroCls`` is 'ThisIsAMacro'
then this method will return 'this-is-a-macro', ie. every
capital letter (except the first) will be prefixed with a
hyphen and changed to lower case.
The method returns the name mangled macro name or **None**
if an error occurred.
|Args|
* ``macroCls`` (**QtmacsMacro**): ``QtmacsMacro``- or derived
class (not an instance!)
|Returns|
**str**: the name mangled string or **None** if an error occurred.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
|
def run(cmd, stdout=None, stderr=None, **kwargs):
"""
A blocking wrapper around subprocess.Popen(), but with a simpler interface
for the stdout/stderr arguments:
stdout=False / stderr=False
stdout/stderr will be redirected to /dev/null (or discarded in some
other suitable manner)
stdout=True / stderr=True
stdout/stderr will be captured and returned as a list of lines.
stdout=None
stdout will be redirected to the python process's stdout, which may be
a tty (same as using stdout=subprocess.None)
stderr=None:
stderr will be redirected to the python process's stderr, which may be
a tty (same as using stderr=subprocess.None)
stderr="STDOUT"
Same as using stderr=subprocess.STDOUT
The return value will be a tuple of (exitcode, stdout, stderr)
If stdout and/or stderr were not captured, they will be None instead.
"""
devnull = None
try:
stdoutfilter = None
stderrfilter = None
wantstdout = False
wantstderr = False
if stdout is False:
devnull = open('/dev/null', 'w')
stdout = devnull
elif stdout is True:
stdout = subprocess.PIPE
wantstdout = True
elif callable(stdout):
stdoutfilter = partial(stdout)
stdout = subprocess.PIPE
else:
assert stdout is None, "Invalid stdout %r" % stdout
if stderr is False:
if devnull is None:
devnull = open('/dev/null', 'w')
stderr = devnull
elif stderr is True:
stderr = subprocess.PIPE
wantstderr = True
elif stderr == "STDOUT":
stderr = subprocess.STDOUT
elif callable(stderr):
stderrfilter = partial(stderr)
stderr = subprocess.PIPE
else:
assert stderr is None, "Invalid stderr %r" % stderr
if (stdoutfilter or stderrfilter) and asyncio:
# run background process asynchronously and filter output as
# it is running
exitcode, out, err, = _runasync(stdoutfilter,
stderrfilter,
cmd,
stdout=stdout,
stderr=stderr,
**kwargs)
if not wantstdout:
out = None
if not wantstderr:
err = None
return exitcode, out, err
proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, **kwargs)
out, err = proc.communicate()
if not wantstdout:
if stdoutfilter:
stdoutfilter(out, True)
out = None
if not wantstderr:
if stderrfilter:
stderrfilter(err, True)
err = None
return proc.returncode, out, err
finally:
if devnull is not None:
devnull.close()
|
A blocking wrapper around subprocess.Popen(), but with a simpler interface
for the stdout/stderr arguments:
stdout=False / stderr=False
stdout/stderr will be redirected to /dev/null (or discarded in some
other suitable manner)
stdout=True / stderr=True
stdout/stderr will be captured and returned as a list of lines.
stdout=None
stdout will be redirected to the python process's stdout, which may be
a tty (same as using stdout=subprocess.None)
stderr=None:
stderr will be redirected to the python process's stderr, which may be
a tty (same as using stderr=subprocess.None)
stderr="STDOUT"
Same as using stderr=subprocess.STDOUT
The return value will be a tuple of (exitcode, stdout, stderr)
If stdout and/or stderr were not captured, they will be None instead.
|
def bind(self, cube):
""" When one column needs to match, use the key. """
if self.measure:
table, column = self.measure.bind(cube)
else:
table, column = cube.fact_table, cube.fact_pk
# apply the SQL aggregation function:
column = getattr(func, self.function)(column)
column = column.label(self.ref)
column.quote = True
return table, column
|
When one column needs to match, use the key.
|
def get_data(self, url='',headers={}, date=str(datetime.date.today()),
dict_to_store={}, type='', repo_name=''):
"""
Retrieves data from json and stores it in the supplied dict. Accepts
'clones' or 'views' as type.
"""
#JSON
url = (url + '/traffic/' + type)
r3 = requests.get(url, headers=headers)
json = r3.json()
if type == 'views':
self.views_json[repo_name] = json
elif type == 'clones':
self.clones_json[repo_name] = json
#CSV
for day in json[type]:
timestamp_seconds = day['timestamp']/1000
try:
date_timestamp = datetime.datetime.utcfromtimestamp(
timestamp_seconds).strftime('%Y-%m-%d')
#do not add todays date, some views might not be recorded yet
if date_timestamp != date:
tuple_in = (day['count'], day['uniques'])
tuple = (dict_to_store[timestamp_seconds][0] + tuple_in[0],
dict_to_store[timestamp_seconds][1] + tuple_in[1])
dict_to_store[timestamp_seconds] = tuple
except KeyError:
tuple = dict_to_store[timestamp_seconds] = (day['count'],
day['uniques'])
|
Retrieves data from json and stores it in the supplied dict. Accepts
'clones' or 'views' as type.
|
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
sort=None, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic.
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy, sort=sort)
return op.get_result()
|
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic.
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.