docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
See Also: createGroupResponse()
Args:
group:
vendorSpecific:
Returns:
|
def createGroup(self, group, vendorSpecific=None):
response = self.createGroupResponse(group, vendorSpecific)
return self._read_boolean_response(response)
| 693,194
|
CNIdentity.addGroupMembers(session, groupName, members) β boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.addGroupMembers.
Args:
group:
vendorSpecific:
Returns:
|
def updateGroupResponse(self, group, vendorSpecific=None):
mmp_dict = {'group': ('group.xml', group.toxml('utf-8'))}
return self.PUT('groups', fields=mmp_dict, headers=vendorSpecific)
| 693,195
|
See Also: updateGroupResponse()
Args:
group:
vendorSpecific:
Returns:
|
def updateGroup(self, group, vendorSpecific=None):
response = self.updateGroupResponse(group, vendorSpecific)
return self._read_boolean_response(response)
| 693,196
|
CNReplication.setReplicationStatus(session, pid, nodeRef, status, failure) β
boolean https://releases.dataone.org/online/api-documentatio
n-v2.0.1/apis/CN_APIs.html#CNReplication.setReplicationStatus.
Args:
pid:
nodeRef:
status:
dataoneError:
vendorSpecific:
Returns:
|
def setReplicationStatusResponse(
self, pid, nodeRef, status, dataoneError=None, vendorSpecific=None
):
mmp_dict = {'nodeRef': nodeRef, 'status': status} # .toxml('utf-8'),
if dataoneError is not None:
mmp_dict['failure'] = ('failure.xml', dataoneError.serialize_to_transport())
return self.PUT(
['replicaNotifications', pid], fields=mmp_dict, headers=vendorSpecific
)
| 693,197
|
See Also: setReplicationStatusResponse()
Args:
pid:
nodeRef:
status:
dataoneError:
vendorSpecific:
Returns:
|
def setReplicationStatus(
self, pid, nodeRef, status, dataoneError=None, vendorSpecific=None
):
response = self.setReplicationStatusResponse(
pid, nodeRef, status, dataoneError, vendorSpecific
)
return self._read_boolean_response(response)
| 693,198
|
CNReplication.updateReplicationMetadata(session, pid, replicaMetadata,
serialVersion) β boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_AP Is.html#CNReplication.updateReplicationMetadata
Not implemented.
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
|
def updateReplicationMetadataResponse(
self, pid, replicaMetadata, serialVersion, vendorSpecific=None
):
mmp_dict = {
'replicaMetadata': ('replicaMetadata.xml', replicaMetadata.toxml('utf-8')),
'serialVersion': str(serialVersion),
}
return self.PUT(
['replicaMetadata', pid], fields=mmp_dict, headers=vendorSpecific
)
| 693,199
|
See Also: updateReplicationMetadataResponse()
Args:
pid:
replicaMetadata:
serialVersion:
vendorSpecific:
Returns:
|
def updateReplicationMetadata(
self, pid, replicaMetadata, serialVersion, vendorSpecific=None
):
response = self.updateReplicationMetadataResponse(
pid, replicaMetadata, serialVersion, vendorSpecific
)
return self._read_boolean_response(response)
| 693,200
|
CNReplication.setReplicationPolicy(session, pid, policy, serialVersion) β
boolean https://releases.dataone.org/online/api-docume
ntation-v2.0.1/apis/CN_APIs.html#CNReplication.setReplicationPolicy.
Args:
pid:
policy:
serialVersion:
vendorSpecific:
Returns:
|
def setReplicationPolicyResponse(
self, pid, policy, serialVersion, vendorSpecific=None
):
mmp_dict = {
'policy': ('policy.xml', policy.toxml('utf-8')),
'serialVersion': (str(serialVersion)),
}
return self.PUT(
['replicaPolicies', pid], fields=mmp_dict, headers=vendorSpecific
)
| 693,201
|
See Also: setReplicationPolicyResponse()
Args:
pid:
policy:
serialVersion:
vendorSpecific:
Returns:
|
def setReplicationPolicy(self, pid, policy, serialVersion, vendorSpecific=None):
response = self.setReplicationPolicyResponse(
pid, policy, serialVersion, vendorSpecific
)
return self._read_boolean_response(response)
| 693,202
|
CNReplication.isNodeAuthorized(session, targetNodeSubject, pid,
replicatePermission) β boolean() https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNReplication.isNodeAuthorized.
Args:
targetNodeSubject:
pid:
vendorSpecific:
Returns:
|
def isNodeAuthorizedResponse(self, targetNodeSubject, pid, vendorSpecific=None):
query_dict = {'targetNodeSubject': targetNodeSubject}
return self.GET(
['replicaAuthorizations', pid], query=query_dict, headers=vendorSpecific
)
| 693,203
|
See Also: isNodeAuthorizedResponse()
Args:
targetNodeSubject:
pid:
vendorSpecific:
Returns:
|
def isNodeAuthorized(self, targetNodeSubject, pid, vendorSpecific=None):
response = self.isNodeAuthorizedResponse(targetNodeSubject, pid, vendorSpecific)
return self._read_boolean_401_response(response)
| 693,204
|
CNReplication.deleteReplicationMetadata(session, pid, policy, serialVersion)
β boolean https://releases.dataone.org/online/api-docume
ntation-v2.0.1/apis/CN_APIs.html#CNReplication.deleteReplicationMetadat a.
Args:
pid:
nodeId:
serialVersion:
vendorSpecific:
Returns:
|
def deleteReplicationMetadataResponse(
self, pid, nodeId, serialVersion, vendorSpecific=None
):
mmp_dict = {'nodeId': nodeId, 'serialVersion': str(serialVersion)}
return self.PUT(
['removeReplicaMetadata', pid], fields=mmp_dict, headers=vendorSpecific
)
| 693,205
|
See Also: deleteReplicationMetadataResponse()
Args:
pid:
nodeId:
serialVersion:
vendorSpecific:
Returns:
|
def deleteReplicationMetadata(
self, pid, nodeId, serialVersion, vendorSpecific=None
):
response = self.deleteReplicationMetadataResponse(
pid, nodeId, serialVersion, vendorSpecific
)
return self._read_boolean_response(response)
| 693,206
|
CNRegister.updateNodeCapabilities(session, nodeId, node) β boolean
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_AP
Is.html#CNRegister.updateNodeCapabilities.
Args:
nodeId:
node:
vendorSpecific:
Returns:
|
def updateNodeCapabilitiesResponse(self, nodeId, node, vendorSpecific=None):
mmp_dict = {'node': ('node.xml', node.toxml('utf-8'))}
return self.PUT(['node', nodeId], fields=mmp_dict, headers=vendorSpecific)
| 693,207
|
See Also: updateNodeCapabilitiesResponse()
Args:
nodeId:
node:
vendorSpecific:
Returns:
|
def updateNodeCapabilities(self, nodeId, node, vendorSpecific=None):
response = self.updateNodeCapabilitiesResponse(nodeId, node, vendorSpecific)
return self._read_boolean_response(response)
| 693,208
|
CNRegister.register(session, node) β NodeReference
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNRegister.register.
Args:
node:
vendorSpecific:
Returns:
|
def registerResponse(self, node, vendorSpecific=None):
mmp_dict = {'node': ('node.xml', node.toxml('utf-8'))}
return self.POST('node', fields=mmp_dict, headers=vendorSpecific)
| 693,209
|
See Also: registerResponse()
Args:
node:
vendorSpecific:
Returns:
|
def register(self, node, vendorSpecific=None):
response = self.registerResponse(node, vendorSpecific)
return self._read_boolean_response(response)
| 693,210
|
Calculate the checksum of a stream.
Args:
f: file-like object
Only requirement is a ``read()`` method that returns ``bytes``.
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
Populated Checksum PyXB object.
|
def create_checksum_object_from_stream(
f, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
):
checksum_str = calculate_checksum_on_stream(f, algorithm)
checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str)
checksum_pyxb.algorithm = algorithm
return checksum_pyxb
| 693,390
|
Calculate the checksum of an iterator.
Args:
itr: iterable
Object which supports the iterator protocol.
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
Populated Checksum PyXB object.
|
def create_checksum_object_from_iterator(
itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
):
checksum_str = calculate_checksum_on_iterator(itr, algorithm)
checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str)
checksum_pyxb.algorithm = algorithm
return checksum_pyxb
| 693,391
|
Calculate the checksum of ``bytes``.
Warning:
This method requires the entire object to be buffered in (virtual) memory, which
should normally be avoided in production code.
Args:
b: bytes
Raw bytes
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
Populated PyXB Checksum object.
|
def create_checksum_object_from_bytes(
b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
):
checksum_str = calculate_checksum_on_bytes(b, algorithm)
checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str)
checksum_pyxb.algorithm = algorithm
return checksum_pyxb
| 693,392
|
Calculate the checksum of a stream.
Args:
f: file-like object
Only requirement is a ``read()`` method that returns ``bytes``.
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
chunk_size : int
Number of bytes to read from the file and add to the checksum at a time.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
|
def calculate_checksum_on_stream(
f,
algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM,
chunk_size=DEFAULT_CHUNK_SIZE,
):
checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)
while True:
chunk = f.read(chunk_size)
if not chunk:
break
checksum_calc.update(chunk)
return checksum_calc.hexdigest()
| 693,393
|
Calculate the checksum of an iterator.
Args:
itr: iterable
Object which supports the iterator protocol.
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
|
def calculate_checksum_on_iterator(
itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
):
checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)
for chunk in itr:
checksum_calc.update(chunk)
return checksum_calc.hexdigest()
| 693,394
|
Calculate the checksum of ``bytes``.
Warning: This method requires the entire object to be buffered in (virtual) memory,
which should normally be avoided in production code.
Args:
b: bytes
Raw bytes
algorithm: str
Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``.
Returns:
str : Checksum as a hexadecimal string, with length decided by the algorithm.
|
def calculate_checksum_on_bytes(
b, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM
):
checksum_calc = get_checksum_calculator_by_dataone_designator(algorithm)
checksum_calc.update(b)
return checksum_calc.hexdigest()
| 693,395
|
Create string representation of a PyXB Checksum object.
Args:
PyXB Checksum object
Returns:
str : Combined hexadecimal value and algorithm name.
|
def format_checksum(checksum_pyxb):
return '{}/{}'.format(
checksum_pyxb.algorithm.upper().replace('-', ''), checksum_pyxb.value().lower()
)
| 693,397
|
MNRead.get()
Retrieve the SciObj bytes and write them to a file or other stream.
Args:
file_stream: Open file-like object
Stream to which the SciObj bytes will be written.
pid: str
vendor_specific: dict
Custom HTTP headers to include in the request
See also:
MNRead.get().
|
async def get(self, file_stream, pid, vendor_specific=None):
async with await self._retry_request(
"get", ["object", pid], vendor_specific=vendor_specific
) as response:
self._assert_valid_response(response)
async for chunk_str, _ in response.content.iter_chunks():
file_stream.write(chunk_str)
| 693,421
|
Create a stream containing a BagIt zip archive.
Args:
dir_name : str
The name of the root directory in the zip file, under which all the files
are placed (avoids "zip bombs").
payload_info_list: list
List of payload_info_dict, each dict describing a file.
- keys: pid, filename, iter, checksum, checksum_algorithm
- If the filename is None, the pid is used for the filename.
|
def create_bagit_stream(dir_name, payload_info_list):
zip_file = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED)
_add_path(dir_name, payload_info_list)
payload_byte_count, payload_file_count = _add_payload_files(
zip_file, payload_info_list
)
tag_info_list = _add_tag_files(
zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count
)
_add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list)
_add_tag_manifest_file(zip_file, dir_name, tag_info_list)
return zip_file
| 693,458
|
Parse a multipart Requests.Response into a tuple of BodyPart objects.
Args:
response: Requests.Response
encoding:
The parser will assume that any text in the HTML body is encoded with this
encoding when decoding it for use in the ``text`` attribute.
Returns:
tuple of BodyPart
Members: headers (CaseInsensitiveDict), content (bytes), text (Unicode),
encoding (str).
|
def parse_response(response, encoding='utf-8'):
return requests_toolbelt.multipart.decoder.MultipartDecoder.from_response(
response, encoding
).parts
| 693,569
|
Extract the MIME type value from a content type string.
Removes any subtype and parameter values that may be present in the string.
Args:
content_type: str
String with content type and optional subtype and parameter fields.
Returns:
str: String with only content type
Example:
::
Input: multipart/form-data; boundary=aBoundaryString
Returns: multipart/form-data
|
def get_content_type(content_type):
m = email.message.Message()
m['Content-Type'] = content_type
return m.get_content_type()
| 693,792
|
Serialize a native object to normalized, pretty printed JSON.
The JSON string is normalized by sorting any dictionary keys.
Args:
py_obj: object
Any object that can be represented in JSON. Some types, such as datetimes are
automatically converted to strings.
Returns:
str: normalized, pretty printed JSON string.
|
def serialize_to_normalized_pretty_json(py_obj):
return json.dumps(py_obj, sort_keys=True, indent=2, cls=ToJsonCompatibleTypes)
| 693,796
|
Serialize a native object to normalized, compact JSON.
The JSON string is normalized by sorting any dictionary keys. It will be on a single
line without whitespace between elements.
Args:
py_obj: object
Any object that can be represented in JSON. Some types, such as datetimes are
automatically converted to strings.
Returns:
str: normalized, compact JSON string.
|
def serialize_to_normalized_compact_json(py_obj):
return json.dumps(
py_obj, sort_keys=True, separators=(',', ':'), cls=ToJsonCompatibleTypes
)
| 693,797
|
Format seconds to days, hours, minutes.
Args:
sec: float or int
Number of seconds in a period of time
Returns:
Period of time represented as a string on the form ``0d:00h:00m``.
|
def format_sec_to_dhm(sec):
rem_int, s_int = divmod(int(sec), 60)
rem_int, m_int, = divmod(rem_int, 60)
d_int, h_int, = divmod(rem_int, 24)
return '{}d{:02d}h{:02d}m'.format(d_int, h_int, m_int)
| 693,798
|
Count an event.
Args:
event_str:
The name of an event to count. Used as a key in the event dict. The same
name will also be used in the summary.
inc_int: int
Optional argument to increase the count for the event by more than 1.
|
def count(self, event_str, inc_int=1):
self._event_dict.setdefault(event_str, 0)
self._event_dict[event_str] += inc_int
| 693,799
|
MNStorage.updateSystemMetadata(session, pid, sysmeta) β boolean
http://jenkins-1.dataone.org/documentation/unstable/API-Documentation-
development/apis/MN_APIs.html#MNStorage.updateSystemMetadata.
Args:
pid:
sysmeta_pyxb:
vendorSpecific:
Returns:
|
def updateSystemMetadataResponse(self, pid, sysmeta_pyxb, vendorSpecific=None):
mmp_dict = {
'pid': pid.encode('utf-8'),
'sysmeta': ('sysmeta.xml', sysmeta_pyxb.toxml('utf-8')),
}
return self.PUT('meta', fields=mmp_dict, headers=vendorSpecific)
| 693,903
|
Format datetime to HTTP Full Date format.
Args:
dt : datetime
- tz-aware: Used in the formatted string.
- tz-naive: Assumed to be in UTC.
Returns:
str
The returned format is a is fixed-length subset of that defined by RFC 1123 and
is
the preferred format for use in the HTTP Date header. E.g.:
``Sat, 02 Jan 1999 03:04:05 GMT``
See Also:
- http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
|
def http_datetime_str_from_dt(dt):
epoch_seconds = ts_from_dt(dt)
return email.utils.formatdate(epoch_seconds, localtime=False, usegmt=True)
| 693,970
|
Round datetime up or down to nearest divisor.
Round datetime up or down to nearest number of seconds that divides evenly by
the divisor.
Any timezone is preserved but ignored in the rounding.
Args:
dt: datetime
n_round_sec : int or float
Divisor for rounding
Examples:
- ``n_round_sec`` = 0.1: nearest 10th of a second.
- ``n_round_sec`` = 1: nearest second.
- ``n_round_sec`` = 30: nearest half minute.
|
def round_to_nearest(dt, n_round_sec=1.0):
ts = ts_from_dt(strip_timezone(dt)) + n_round_sec / 2.0
res = dt_from_ts(ts - (ts % n_round_sec))
return res.replace(tzinfo=dt.tzinfo)
| 693,974
|
Serialize ResourceMap to UTF-8 encoded XML document.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
bytes: UTF-8 encoded XML doc.
Note:
Only the default, "xml", is automatically indexed by DataONE.
|
def serialize_to_transport(self, doc_format="xml", *args, **kwargs):
return super(ResourceMap, self).serialize(
format=doc_format, encoding="utf-8", *args, **kwargs
)
| 694,034
|
Serialize ResourceMap to an XML doc that is pretty printed for display.
Args:
doc_format: str
One of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,
``trig`` and ``nquads``.
args and kwargs:
Optional arguments forwarded to rdflib.ConjunctiveGraph.serialize().
Returns:
str: Pretty printed Resource Map XML doc
Note:
Only the default, "xml", is automatically indexed by DataONE.
|
def serialize_to_display(self, doc_format="pretty-xml", *args, **kwargs):
return (
super(ResourceMap, self)
.serialize(format=doc_format, encoding=None, *args, **kwargs)
.decode("utf-8")
)
| 694,035
|
Add a resource to the Resource Map.
Args:
pid : str
|
def addResource(self, pid):
self._check_initialized()
try:
# is entry already in place?
self.getObjectByPid(pid)
return
except IndexError:
pass
# Entry not present, add it to the graph
oid = self._pid_to_id(pid)
obj = rdflib.URIRef(oid)
ag = self.getAggregation()
self.add((ag, ORE.aggregates, obj))
self.add((obj, ORE.isAggregatedBy, ag))
self.add((obj, DCTERMS.identifier, rdflib.term.Literal(pid)))
| 694,039
|
Add a CiTO, the Citation Typing Ontology, triple asserting that
``documenting_pid`` documents ``documented_pid``.
Adds assertion: ``documenting_pid cito:documents documented_pid``
Args:
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
|
def setDocuments(self, documenting_pid, documented_pid):
self._check_initialized()
documenting_id = self.getObjectByPid(documenting_pid)
documented_id = self.getObjectByPid(documented_pid)
self.add((documenting_id, CITO.documents, documented_id))
| 694,040
|
Add a CiTO, the Citation Typing Ontology, triple asserting that
``documented_pid`` isDocumentedBy ``documenting_pid``.
Adds assertion: ``documented_pid cito:isDocumentedBy documenting_pid``
Args:
documented_pid: str
PID of a Science Object that is documented by ``documenting_pid``.
documenting_pid: str
PID of a Science Object that documents ``documented_pid``.
|
def setDocumentedBy(self, documented_pid, documenting_pid):
self._check_initialized()
documented_id = self.getObjectByPid(documented_pid)
documenting_id = self.getObjectByPid(documenting_pid)
self.add((documented_id, CITO.isDocumentedBy, documenting_id))
| 694,041
|
Add Science Data object(s)
Args:
scidata_pid_list : list of str
List of one or more PIDs of Science Data objects
scimeta_pid: str
PID of a Science Metadata object that documents the Science Data objects.
|
def addDataDocuments(self, scidata_pid_list, scimeta_pid=None):
mpids = self.getAggregatedScienceMetadataPids()
if scimeta_pid is None:
if len(mpids) > 1:
raise ValueError(
"No metadata PID specified and more than one choice available."
)
scimeta_pid = mpids[0]
else:
if scimeta_pid not in mpids:
self.addMetadataDocument(scimeta_pid)
for dpid in scidata_pid_list:
self.addResource(dpid)
self.setDocumentedBy(dpid, scimeta_pid)
self.setDocuments(scimeta_pid, dpid)
| 694,042
|
Extract subject from the JWT without validating the JWT.
- The extracted subject cannot be trusted for authn or authz.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
str: The subject contained in the JWT.
|
def get_subject_without_validation(jwt_bu64):
try:
jwt_dict = get_jwt_dict(jwt_bu64)
except JwtException as e:
return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)
try:
return jwt_dict['sub']
except LookupError:
log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict)
| 694,147
|
Parse Base64 encoded JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict containing Unicode strings.
- In addition, a SHA1 hash is added to the dict for convenience.
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
Returns:
dict: Values embedded in and derived from the JWT.
|
def get_jwt_dict(jwt_bu64):
jwt_tup = get_jwt_tup(jwt_bu64)
try:
jwt_dict = json.loads(jwt_tup[0].decode('utf-8'))
jwt_dict.update(json.loads(jwt_tup[1].decode('utf-8')))
jwt_dict['_sig_sha1'] = hashlib.sha1(jwt_tup[2]).hexdigest()
except TypeError as e:
raise JwtException('Decode failed. error="{}"'.format(e))
return jwt_dict
| 694,148
|
Validate the JWT and return as a dict.
- JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and
returns it as a dict.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Raises:
JwtException: If validation fails.
Returns:
dict: Values embedded in the JWT.
|
def validate_and_decode(jwt_bu64, cert_obj):
try:
return jwt.decode(
jwt_bu64.strip(), cert_obj.public_key(), algorithms=['RS256'], verify=True
)
except jwt.InvalidTokenError as e:
raise JwtException('Signature is invalid. error="{}"'.format(str(e)))
| 694,149
|
Dump JWT to log.
Args:
log: Logger
Logger to which to write the message.
msg_str: str
A message to write to the log before the JWT values.
jwt_dict: dict
JWT containing values to log.
Returns:
None
|
def log_jwt_dict_info(log, msg_str, jwt_dict):
d = ts_to_str(jwt_dict)
# Log known items in specific order, then the rest just sorted
log_list = [(b, d.pop(a)) for a, b, c in CLAIM_LIST if a in d] + [
(k, d[k]) for k in sorted(d)
]
list(
map(
log,
['{}:'.format(msg_str)] + [' {}: {}'.format(k, v) for k, v in log_list],
)
)
| 694,150
|
Convert timestamps in JWT to human readable dates.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with human readable
dates.
|
def ts_to_str(jwt_dict):
d = ts_to_dt(jwt_dict)
for k, v in list(d.items()):
if isinstance(v, datetime.datetime):
d[k] = v.isoformat().replace('T', ' ')
return d
| 694,151
|
Convert timestamps in JWT to datetime objects.
Args:
jwt_dict: dict
JWT with some keys containing timestamps.
Returns:
dict: Copy of input dict where timestamps have been replaced with
datetime.datetime() objects.
|
def ts_to_dt(jwt_dict):
d = jwt_dict.copy()
for k, v in [v[:2] for v in CLAIM_LIST if v[2]]:
if k in jwt_dict:
d[k] = d1_common.date_time.dt_from_ts(jwt_dict[k])
return d
| 694,152
|
Encode bytes to a URL safe flavor of Base64 used by JWTs.
- Reverse of decode_bu64().
Args:
b: bytes
Bytes to Base64 encode.
Returns:
bytes: URL safe Base64 encoded version of input.
|
def encode_bu64(b):
s = base64.standard_b64encode(b)
s = s.rstrip('=')
s = s.replace('+', '-')
s = s.replace('/', '_')
return s
| 694,153
|
Encode bytes to a URL safe flavor of Base64 used by JWTs.
- Reverse of encode_bu64().
Args:
b: bytes
URL safe Base64 encoded bytes to encode.
Returns:
bytes: Decoded bytes.
|
def decode_bu64(b):
s = b
s = s.replace(b'-', b'+')
s = s.replace(b'_', b'/')
p = len(s) % 4
if p == 0:
pass
elif p == 2:
s += b'=='
elif p == 3:
s += b'='
else:
raise ValueError('Illegal Base64url string')
return base64.standard_b64decode(s)
| 694,154
|
Deserialize SubjectInfo XML doc to native object.
Args:
subject_info_xml: str
SubjectInfo XML doc
Returns:
SubjectInfo PyXB object
|
def deserialize_subject_info(subject_info_xml):
try:
return d1_common.xml.deserialize(subject_info_xml)
except ValueError as e:
raise d1_common.types.exceptions.InvalidToken(
0,
'Could not deserialize SubjectInfo. subject_info="{}", error="{}"'.format(
subject_info_xml, str(e)
),
)
| 694,178
|
Get path from root to this node.
Args:
sep: str
One or more characters to insert between each element in the path.
Defaults to "/" on Unix and "\" on Windows.
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
str: String describing the path from the root to this node.
|
def get_path_str(self, sep=os.path.sep, type_str=None):
return sep.join(
list(
reversed(
[
v.label_str
for v in self.parent_gen
if type_str in (None, v.type_str)
]
)
)
)
| 694,190
|
Get list of the labels of the nodes leading up to this node from the root.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
list of str: The labels of the nodes leading up to this node from the root.
|
def get_path_list(self, type_str=None):
return list(
reversed(
[v.label_str for v in self.parent_gen if type_str in (None, v.type_str)]
)
)
| 694,192
|
Get a set of label_str for the tree rooted at this node.
Args:
type_str:
SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include
information from nodes of that type.
Returns:
set: The labels of the nodes leading up to this node from the root.
|
def get_label_set(self, type_str=None):
return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)}
| 694,193
|
r"""Solve the WET RGE for a specific sector.
Parameters:
- sector: sector of interest
- C_in: dictionary of Wilson coefficients
- eta_s: ratio of $\alpha_s$ at input and output scale
- f: number of active quark flavours
- p_in: running parameters at the input scale
- p_out: running parameters at the output scale
|
def run_sector(sector, C_in, eta_s, f, p_in, p_out, qed_order=1, qcd_order=1):
r
Cdictout = OrderedDict()
classname = sectors[sector]
keylist = coeffs[sector]
if sector == 'dF=0':
perm_keys = get_permissible_wcs('dF0', f)
else:
perm_keys = get_permissible_wcs(sector, f)
if perm_keys != 'all':
# remove disallowed keys if necessary
keylist = np.asarray(keylist)[perm_keys]
C_input = np.array([C_in.get(key, 0) for key in keylist])
if np.count_nonzero(C_input) == 0 or classname == 'inv':
# nothing to do for SM-like WCs or RG invariant operators
C_result = C_input
else:
C_scaled = np.asarray([C_input[i] * scale_C(key, p_in) for i, key in enumerate(keylist)])
if qcd_order == 0:
Us = np.eye(len(C_scaled))
elif qcd_order == 1:
Us = getUs(classname, eta_s, f, **p_in)
if qed_order == 0:
Ue = np.zeros(C_scaled.shape)
elif qed_order == 1:
if qcd_order == 0:
Ue = getUe(classname, 1, f, **p_in)
else:
Ue = getUe(classname, eta_s, f, **p_in)
C_out = (Us + Ue) @ C_scaled
C_result = [C_out[i] / scale_C(key, p_out) for i, key in enumerate(keylist)]
for j in range(len(C_result)):
Cdictout[keylist[j]] = C_result[j]
return Cdictout
| 694,212
|
Generate a Certificate Signing Request (CSR).
Args:
private_key_bytes: bytes
Private key with which the CSR will be signed.
subject_name: str
Certificate Subject Name
fqdn_list:
List of Fully Qualified Domain Names (FQDN) and/or IP addresses for which
this certificate will provide authentication.
E.g.: ['my.membernode.org', '1.2.3.4']
|
def generate_csr(private_key_bytes, subject_name, fqdn_list):
return (
cryptography.x509.CertificateSigningRequestBuilder()
.subject_name(subject_name)
.add_extension(
extension=cryptography.x509.SubjectAlternativeName(
[cryptography.x509.DNSName(v) for v in fqdn_list]
),
critical=False,
)
.sign(
private_key=private_key_bytes,
algorithm=cryptography.hazmat.primitives.hashes.SHA256(),
backend=cryptography.hazmat.backends.default_backend(),
)
)
| 694,236
|
Deserialize PEM (Base64) encoded X.509 v3 certificate.
Args:
cert_pem: str or bytes
PEM (Base64) encoded X.509 v3 certificate
Returns:
cert_obj: cryptography.Certificate
|
def deserialize_pem(cert_pem):
if isinstance(cert_pem, str):
cert_pem = cert_pem.encode("utf-8")
return cryptography.x509.load_pem_x509_certificate(
data=cert_pem, backend=cryptography.hazmat.backends.default_backend()
)
| 694,237
|
Serialize certificate to PEM.
The certificate can be also be a Certificate Signing Request (CSR).
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: PEM encoded certificate
|
def serialize_cert_to_pem(cert_obj):
return cert_obj.public_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM
)
| 694,238
|
Extract DataONE SubjectInfo XML doc from certificate.
Certificates issued by DataONE may include an embedded XML doc containing
additional information about the subject specified in the certificate DN. If
present, the doc is stored as an extension with an OID specified by DataONE and
formatted as specified in the DataONE SubjectInfo schema definition.
Args:
cert_obj: cryptography.Certificate
Returns:
str : SubjectInfo XML doc if present, else None
|
def extract_subject_info_extension(cert_obj):
try:
subject_info_der = cert_obj.extensions.get_extension_for_oid(
cryptography.x509.oid.ObjectIdentifier(DATAONE_SUBJECT_INFO_OID)
).value.value
return str(pyasn1.codec.der.decoder.decode(subject_info_der)[0])
except Exception as e:
logging.debug('SubjectInfo not extracted. reason="{}"'.format(e))
| 694,239
|
Download public certificate from a TLS/SSL web server as PEM encoded string.
Also see download_as_der().
Args:
base_url : str
A full URL to a DataONE service endpoint or a server hostname
timeout_sec : int or float
Timeout for the SSL socket operations
Returns:
str: The certificate as a PEM encoded string.
|
def download_as_pem(
base_url=d1_common.const.URL_DATAONE_ROOT,
timeout_sec=d1_common.const.DEFAULT_HTTP_TIMEOUT,
):
return ssl.DER_cert_to_PEM_cert(download_as_der(base_url, timeout_sec))
| 694,241
|
Download public certificate from a TLS/SSL web server as Certificate object.
Also see download_as_der().
Args:
base_url : str
A full URL to a DataONE service endpoint or a server hostname
timeout_sec : int or float
Timeout for the SSL socket operations
Returns:
cryptography.Certificate
|
def download_as_obj(
base_url=d1_common.const.URL_DATAONE_ROOT,
timeout_sec=d1_common.const.DEFAULT_HTTP_TIMEOUT,
):
return decode_der(download_as_der(base_url, timeout_sec))
| 694,242
|
Decode cert DER string to Certificate object.
Args:
cert_der : Certificate as a DER encoded string
Returns:
cryptography.Certificate()
|
def decode_der(cert_der):
return cryptography.x509.load_der_x509_certificate(
data=cert_der, backend=cryptography.hazmat.backends.default_backend()
)
| 694,243
|
Serialize private key to PEM.
Args:
private_key:
passphrase_bytes:
Returns:
bytes: PEM encoded private key
|
def serialize_private_key_to_pem(private_key, passphrase_bytes=None):
return private_key.private_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM,
format=cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=cryptography.hazmat.primitives.serialization.BestAvailableEncryption(
passphrase_bytes
)
if passphrase_bytes is not None
else cryptography.hazmat.primitives.serialization.NoEncryption(),
)
| 694,246
|
Extract public key from certificate as PEM encoded PKCS#1.
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: PEM encoded PKCS#1 public key.
|
def get_public_key_pem(cert_obj):
return cert_obj.public_key().public_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM,
format=cryptography.hazmat.primitives.serialization.PublicFormat.PKCS1,
)
| 694,248
|
Serialize certificate to DER.
Args:
cert_obj: cryptography.Certificate
Returns:
bytes: DER encoded certificate
|
def serialize_cert_to_der(cert_obj):
return cert_obj.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER
)
| 694,252
|
Dump basic certificate values to the log.
Args:
logger: Logger
Logger to which to write the certificate values.
msg_str: str
A message to write to the log before the certificate values.
cert_obj: cryptography.Certificate
Certificate containing values to log.
Returns:
None
|
def log_cert_info(logger, msg_str, cert_obj):
list(
map(
logger,
["{}:".format(msg_str)]
+ [
" {}".format(v)
for v in [
"Subject: {}".format(
_get_val_str(cert_obj, ["subject", "value"], reverse=True)
),
"Issuer: {}".format(
_get_val_str(cert_obj, ["issuer", "value"], reverse=True)
),
"Not Valid Before: {}".format(
cert_obj.not_valid_before.isoformat()
),
"Not Valid After: {}".format(cert_obj.not_valid_after.isoformat()),
"Subject Alt Names: {}".format(
_get_ext_val_str(
cert_obj, "SUBJECT_ALTERNATIVE_NAME", ["value", "value"]
)
),
"CRL Distribution Points: {}".format(
_get_ext_val_str(
cert_obj,
"CRL_DISTRIBUTION_POINTS",
["value", "full_name", "value", "value"],
)
),
"Authority Access Location: {}".format(
extract_issuer_ca_cert_url(cert_obj) or "<not found>"
),
]
],
)
)
| 694,255
|
Get a standard certificate extension by attribute name.
Args:
cert_obj: cryptography.Certificate
Certificate containing a standard extension.
extension_name : str
Extension name. E.g., 'SUBJECT_DIRECTORY_ATTRIBUTES'.
Returns:
Cryptography.Extension
|
def get_extension_by_name(cert_obj, extension_name):
try:
return cert_obj.extensions.get_extension_for_oid(
getattr(cryptography.x509.oid.ExtensionOID, extension_name)
)
except cryptography.x509.ExtensionNotFound:
pass
| 694,256
|
Extract values from nested objects by attribute names.
Objects contain attributes which are named references to objects. This will descend
down a tree of nested objects, starting at the given object, following the given
path.
Args:
obj: object
Any type of object
path_list: list
Attribute names
reverse: bool
Reverse the list of values before concatenation.
Returns:
list of objects
|
def _get_val_list(obj, path_list, reverse=False):
try:
y = getattr(obj, path_list[0])
except AttributeError:
return []
if len(path_list) == 1:
return [y]
else:
val_list = [x for a in y for x in _get_val_list(a, path_list[1:], reverse)]
if reverse:
val_list.reverse()
return val_list
| 694,257
|
Extract values from nested objects by attribute names and concatenate their
string representations.
Args:
obj: object
Any type of object
path_list: list
Attribute names
reverse: bool
Reverse the list of values before concatenation.
Returns:
str: Concatenated extracted values.
|
def _get_val_str(obj, path_list=None, reverse=False):
val_list = _get_val_list(obj, path_list or [], reverse)
return "<not found>" if obj is None else " / ".join(map(str, val_list))
| 694,258
|
Deserialize DataONE XML types to PyXB.
Args:
doc_xml: UTF-8 encoded ``bytes``
pyxb_binding: PyXB binding object. If not specified, the correct one should be
selected automatically.
Returns:
PyXB object
See Also:
``deserialize_d1_exception()`` for deserializing DataONE Exception types.
|
def deserialize(doc_xml, pyxb_binding=None):
pyxb_binding = pyxb_binding or d1_common.types.dataoneTypes
try:
return pyxb_binding.CreateFromDocument(doc_xml)
except pyxb.ValidationError as e:
raise ValueError(
'Unable to deserialize XML to PyXB. error="{}" xml="{}"'.format(
e.details(), doc_xml
)
)
except (pyxb.PyXBException, xml.sax.SAXParseException, Exception) as e:
raise ValueError(
'Unable to deserialize XML to PyXB. error="{}" xml="{}"'.format(
str(e), doc_xml
)
)
| 694,329
|
Pretty print XML doc.
Args:
doc_xml : str
Well formed XML doc
Returns:
str: Pretty printed XML doc
|
def reformat_to_pretty_xml(doc_xml):
assert isinstance(doc_xml, str)
dom_obj = xml.dom.minidom.parseString(doc_xml)
pretty_xml = dom_obj.toprettyxml(indent=' ')
# Remove empty lines in the result caused by a bug in toprettyxml()
return re.sub(r'^\s*$\n', r'', pretty_xml, flags=re.MULTILINE)
| 694,333
|
Normalize and compare XML documents for equality. The document may or may not be
a DataONE type.
Args:
a_xml: str
b_xml: str
XML documents to compare for equality.
Returns:
bool: ``True`` if the XML documents are semantically equivalent.
|
def are_equal_xml(a_xml, b_xml):
a_dom = xml.dom.minidom.parseString(a_xml)
b_dom = xml.dom.minidom.parseString(b_xml)
return are_equal_elements(a_dom.documentElement, b_dom.documentElement)
| 694,343
|
Normalize and compare ElementTrees for equality.
Args:
a_el: ElementTree
b_el: ElementTree
ElementTrees to compare for equality.
Returns:
bool: ``True`` if the ElementTrees are semantically equivalent.
|
def are_equal_elements(a_el, b_el):
if a_el.tagName != b_el.tagName:
return False
if sorted(a_el.attributes.items()) != sorted(b_el.attributes.items()):
return False
if len(a_el.childNodes) != len(b_el.childNodes):
return False
for a_child_el, b_child_el in zip(a_el.childNodes, b_el.childNodes):
if a_child_el.nodeType != b_child_el.nodeType:
return False
if (
a_child_el.nodeType == a_child_el.TEXT_NODE
and a_child_el.data != b_child_el.data
):
return False
if a_child_el.nodeType == a_child_el.ELEMENT_NODE and not are_equal_elements(
a_child_el, b_child_el
):
return False
return True
| 694,344
|
In-place sort simple or complex elements in a PyXB object by values they contain
in child elements.
Args:
obj_pyxb: PyXB object
child_name_list: list of str
List of element names that are direct children of the PyXB object.
|
def sort_elements_by_child_values(obj_pyxb, child_name_list):
obj_pyxb.sort(key=lambda x: [get_auto(getattr(x, n)) for n in child_name_list])
| 694,345
|
Create a diff between two PyXB objects.
Args:
a_pyxb: PyXB object
b_pyxb: PyXB object
Returns:
str : `Differ`-style delta
|
def format_diff_pyxb(a_pyxb, b_pyxb):
return '\n'.join(
difflib.ndiff(
serialize_to_xml_str(a_pyxb).splitlines(),
serialize_to_xml_str(b_pyxb).splitlines(),
)
)
| 694,346
|
Create a diff between two XML documents.
Args:
a_xml: str
b_xml: str
Returns:
str : `Differ`-style delta
|
def format_diff_xml(a_xml, b_xml):
return '\n'.join(
difflib.ndiff(
reformat_to_pretty_xml(a_xml).splitlines(),
reformat_to_pretty_xml(b_xml).splitlines(),
)
)
| 694,347
|
See Also: queryResponse()
Args:
queryEngine:
query_str:
vendorSpecific:
do_post:
**kwargs:
Returns:
|
def query(
self, queryEngine, query_str, vendorSpecific=None, do_post=False, **kwargs
):
response = self.queryResponse(
queryEngine, query_str, vendorSpecific, do_post, **kwargs
)
if self._content_type_is_json(response):
return self._read_json_response(response)
else:
return self._read_stream_response(response)
| 694,593
|
See Also: getQueryEngineDescriptionResponse()
Args:
queryEngine:
**kwargs:
Returns:
|
def getQueryEngineDescription(self, queryEngine, **kwargs):
response = self.getQueryEngineDescriptionResponse(queryEngine, **kwargs)
return self._read_dataone_type_response(response, 'QueryEngineDescription')
| 694,595
|
Initialize the SMEFT instance.
Parameters:
- `wc`: the Wilson coefficients as `wcxf.WC` instance.
|
def __init__(self, wc, get_smpar=True):
self.wc = wc
self.scale_in = None
self.C_in = None
if wc is not None:
self._set_initial_wcxf(wc, get_smpar=get_smpar)
| 694,631
|
Return the Wilson coefficients (as wcxf.WC instance) evolved to the
scale `scale`.
Parameters:
- `scale`: scale in GeV
- accuracy: whether to use the numerical solution to the RGE
('integrate', the default, slow but precise) or the leading logarithmic
approximation ('leadinglog', approximate but much faster).
|
def run(self, scale, accuracy='integrate', **kwargs):
if accuracy == 'integrate':
C_out = self._rgevolve(scale, **kwargs)
elif accuracy == 'leadinglog':
C_out = self._rgevolve_leadinglog(scale)
else:
raise ValueError("'{}' is not a valid value of 'accuracy' (must be either 'integrate' or 'leadinglog').".format(accuracy))
return self._to_wcxf(C_out, scale)
| 694,640
|
Extract a DataONE API version tag from a MN or CN service endpoint URL.
Args:
url : str
Service endpoint URL. E.g.: ``https://mn.example.org/path/v2/object/pid``.
Returns:
str : Valid version tags are currently ``v1`` or ``v2``.
|
def extract_version_tag_from_url(url):
m = re.match(r'(/|^)(v\d)(/|$)', url)
if not m:
return None
return m.group(2)
| 694,705
|
Convert a API v2 XML doc to v1 XML doc.
Removes elements that are only valid for v2 and changes namespace to v1.
If doc is already v1, it is returned unchanged.
Args:
xml_str : str
API v2 XML doc. E.g.: ``SystemMetadata v2``.
Returns:
str : API v1 XML doc. E.g.: ``SystemMetadata v1``.
|
def str_to_v1_str(xml_str):
if str_is_v1(xml_str):
return xml_str
etree_obj = str_to_etree(xml_str)
strip_v2_elements(etree_obj)
etree_replace_namespace(etree_obj, d1_common.types.dataoneTypes_v1.Namespace)
return etree_to_str(etree_obj)
| 694,706
|
Convert a API v1 XML doc to v2 XML doc.
All v1 elements are valid for v2, so only changes namespace.
Args:
xml_str : str
API v1 XML doc. E.g.: ``SystemMetadata v1``.
Returns:
str : API v2 XML doc. E.g.: ``SystemMetadata v2``.
|
def str_to_v2_str(xml_str):
if str_is_v2(xml_str):
return xml_str
etree_obj = str_to_etree(xml_str)
etree_replace_namespace(etree_obj, d1_common.types.dataoneTypes_v2_0.Namespace)
return etree_to_str(etree_obj)
| 694,707
|
Deserialize API XML doc to an ElementTree.
Args:
xml_str: bytes
DataONE API XML doc
encoding: str
Decoder to use when converting the XML doc ``bytes`` to a Unicode str.
Returns:
ElementTree: Matching the API version of the XML doc.
|
def str_to_etree(xml_str, encoding='utf-8'):
parser = xml.etree.ElementTree.XMLParser(encoding=encoding)
return xml.etree.ElementTree.fromstring(xml_str, parser=parser)
| 694,711
|
Convert XML tag names with namespace on the form ``{namespace}tag`` to form
``prefix:tag``.
Args:
tag_str: str
Tag name with namespace. E.g.:
``{http://www.openarchives.org/ore/terms/}ResourceMap``.
ns_reverse_dict : dict
A dictionary of namespace to prefix to use for the conversion. If not supplied, a
default dict with the namespaces used in DataONE XML types is used.
Returns:
str: Tag name with prefix. E.g.: ``ore:ResourceMap``.
|
def replace_namespace_with_prefix(tag_str, ns_reverse_dict=None):
ns_reverse_dict = ns_reverse_dict or NS_REVERSE_DICT
for namespace_str, prefix_str in ns_reverse_dict.items():
tag_str = tag_str.replace(
'{{{}}}'.format(namespace_str), '{}:'.format(prefix_str)
)
return tag_str
| 694,712
|
In-place change the namespace of elements in an ElementTree.
Args:
etree_obj: ElementTree
ns_str : str
The namespace to set. E.g.: ``http://ns.dataone.org/service/types/v1``.
|
def etree_replace_namespace(etree_obj, ns_str):
def _replace_recursive(el, n):
el.tag = re.sub(r'{.*\}', '{{{}}}'.format(n), el.tag)
el.text = el.text.strip() if el.text else None
el.tail = el.tail.strip() if el.tail else None
for child_el in el:
_replace_recursive(child_el, n)
_replace_recursive(etree_obj, ns_str)
| 694,713
|
Join a base and a relative path and return an absolute path to the resulting
location.
Args:
base_path: str
Relative or absolute path to prepend to ``rel_path``.
rel_path: str
Path relative to the location of the module file from which this function is called.
Returns:
str : Absolute path to the location specified by ``rel_path``.
|
def abs_path_from_base(base_path, rel_path):
# noinspection PyProtectedMember
return os.path.abspath(
os.path.join(
os.path.dirname(sys._getframe(1).f_code.co_filename), base_path, rel_path
)
)
| 694,817
|
Convert a path that is relative to the module from which this function is called,
to an absolute path.
Args:
rel_path: str
Path relative to the location of the module file from which this function is called.
Returns:
str : Absolute path to the location specified by ``rel_path``.
|
def abs_path(rel_path):
# noinspection PyProtectedMember
return os.path.abspath(
os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), rel_path)
)
| 694,818
|
Return the value of the selected attribute in the selected element.
Args:
attr_key : str
Name of attribute for which to search
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
Returns:
str : Value of the selected attribute in the selected element.
|
def get_attr_value(self, attr_key, el_idx=0):
return self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key]
| 694,827
|
Set the value of the selected attribute of the selected element.
Args:
attr_key : str
Name of attribute for which to search
attr_val : str
Text to set for the attribute.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name.
|
def set_attr_text(self, attr_key, attr_val, el_idx=0):
self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key] = attr_val
| 694,828
|
Replace element.
Select element that has the same name as ``root_el``, then replace the selected
element with ``root_el``
``root_el`` can be a single element or the root of an element tree.
Args:
root_el : element
New element that will replace the existing element.
|
def replace_by_etree(self, root_el, el_idx=0):
el = self.get_element_by_name(root_el.tag, el_idx)
el[:] = list(root_el)
el.attrib = root_el.attrib
| 694,831
|
Replace element.
Select element that has the same name as ``xml_str``, then replace the selected
element with ``xml_str``
- ``xml_str`` must have a single element in the root.
- The root element in ``xml_str`` can have an arbitrary number of children.
Args:
xml_str : str
New element that will replace the existing element.
|
def replace_by_xml(self, xml_str, el_idx=0):
root_el = self.parse_xml(xml_str)
self.replace_by_etree(root_el, el_idx)
| 694,832
|
Return the closest zoom level for a given resolution
Parameters:
resolution -- max. resolution
unit -- unit for output (default='meters')
|
def getClosestZoom(self, resolution, unit='meters'):
lo, hi = self._getZoomLevelRange(resolution, unit)
if lo == 0:
return lo
if hi == len(self.RESOLUTIONS):
return hi - 1
before = self.RESOLUTIONS[lo - 1]
if abs(self.RESOLUTIONS[lo] - resolution) < abs(before - resolution):
return lo
return lo - 1
| 694,950
|
Return the maximized zoom level for a given resolution
Parameters:
resolution -- max. resolution
unit -- unit for output (default='meters')
|
def getCeilingZoom(self, resolution, unit='meters'):
if resolution in self.RESOLUTIONS:
return self.getZoom(resolution)
lo, hi = self._getZoomLevelRange(resolution, unit)
if lo == 0 or lo == hi:
return lo
if hi == len(self.RESOLUTIONS):
return hi - 1
return lo + 1
| 694,951
|
Return the parent tile(s) for an irregular (not following quadindex)
and regular tiling scheme
Parameters:
zoom -- the zoom level a the child tile
row -- the row of the child tile
col -- the col of the child tile
zoomParent -- the target zoom of the parent tile
|
def getParentTiles(self, zoom, col, row, zoomParent):
assert zoomParent <= zoom
if zoomParent == zoom:
return [[zoom, col, row]]
extent = self.tileBounds(zoom, col, row)
minRow, minCol, maxRow, maxCol = self.getExtentAddress(
zoomParent, extent=extent, contained=True)
addresses = []
for c in range(minCol, maxCol + 1):
for r in range(minRow, maxRow + 1):
addresses.append([zoomParent, c, r])
return addresses
| 694,954
|
init an EventsManager
Args:
xcli (XCLIClient): xcli client to send the event
product_name (string): the sending product's name
product_version (string): the sending product's version
Raises:
ValueError: if missing product_name or product_version
|
def __init__(self, xcli, product_name, product_version):
self.xcli = xcli
self.product_name = product_name
self.product_version = product_version
self.server_name = getfqdn()
self.platform = get_platform_details()
# verify init params
if not self.product_name:
raise ValueError('product_name is empty')
if not self.product_version:
raise ValueError('product_version is empty')
| 695,180
|
send css_event and if fails send custom_event instead
Args:
action (ACTIONS): the action causing the event
properties (dict): the action additional properties
event_severity (string): the event severity
Raises:
XCLIError: if the xcli.cmd.custom_event failed
KeyError: if action wasn't predefined
TypeError: if properties is not None or dict
|
def send_event(self, action, properties, event_severity=EVENT_SEVERITY):
# verify properties
event_properties = dict() if (properties is None) else properties
if type(event_properties) is not dict:
raise TypeError('properties is not dict')
# prepare event
event_bunch = Bunch(
Product=self.product_name,
Version=self.product_version,
Server=self.server_name,
Platform=self.platform,
Action=action,
Properties=event_properties)
event_description = self._get_description_prefix() + \
json.dumps(event_bunch)
use_custom_event = True
if CSS_PRODUCT_EVENT in dir(self.xcli.cmd):
try:
# send css product event
log.debug("sending css_product_event "
"description=%s severity=%s",
event_description, event_severity)
self.xcli.cmd.css_product_event(severity=event_severity,
product=self.product_name,
version=self.product_version,
server=self.server_name,
platform=self.platform,
action=action,
properties=event_properties)
use_custom_event = False
except (UnrecognizedCommandError,
OperationForbiddenForUserCategoryError):
log.warning("failed css_product_event "
"description=%s severity=%s",
event_description, event_severity)
if use_custom_event:
# send custom event
log.debug("sending custom_event description=%s severity=%s",
event_description, event_severity)
self.xcli.cmd.custom_event(
description=event_description, severity=event_severity)
| 695,181
|
function to convert HTML-styly color string to RGB values
Args:
s: Color in HTML format
Returns:
list of three RGB color components
|
def htmlcolor_to_rgb(str_color):
if not (str_color.startswith('#') and len(str_color) == 7):
raise ValueError("Bad html color format. Expected: '#RRGGBB' ")
result = [1.0 * int(n, 16) / 255 for n in (str_color[1:3], str_color[3:5], str_color[5:])]
return result
| 696,160
|
Downloads multiple accounts from Gitkit server.
Args:
next_page_token: string, pagination token.
max_results: pagination size.
Returns:
An array of accounts.
|
def DownloadAccount(self, next_page_token=None, max_results=None):
param = {}
if next_page_token:
param['nextPageToken'] = next_page_token
if max_results:
param['maxResults'] = max_results
response = self._InvokeGitkitApi('downloadAccount', param)
# pylint does not recognize the return type of simplejson.loads
# pylint: disable=maybe-no-member
return response.get('nextPageToken', None), response.get('users', {})
| 696,233
|
Uploads multiple accounts to Gitkit server.
Args:
hash_algorithm: string, algorithm to hash password.
hash_key: string, base64-encoded key of the algorithm.
accounts: array of accounts to be uploaded.
Returns:
Response of the API.
|
def UploadAccount(self, hash_algorithm, hash_key, accounts):
param = {
'hashAlgorithm': hash_algorithm,
'signerKey': hash_key,
'users': accounts
}
# pylint does not recognize the return type of simplejson.loads
# pylint: disable=maybe-no-member
return self._InvokeGitkitApi('uploadAccount', param)
| 696,234
|
Invokes Gitkit API, with optional access token for service account.
Args:
method: string, the api method name.
params: dict of optional parameters for the API.
need_service_account: false if service account is not needed.
Raises:
GitkitClientError: if the request is bad.
GitkitServerError: if Gitkit can not handle the request.
Returns:
API response as dict.
|
def _InvokeGitkitApi(self, method, params=None, need_service_account=True):
body = simplejson.dumps(params) if params else None
req = urllib_request.Request(self.google_api_url + method)
req.add_header('Content-type', 'application/json')
if need_service_account:
if self.credentials:
access_token = self.credentials.get_access_token().access_token
elif self.service_account_email and self.service_account_key:
access_token = self._GetAccessToken()
else:
raise errors.GitkitClientError('Missing service account credentials')
req.add_header('Authorization', 'Bearer ' + access_token)
try:
binary_body = body.encode('utf-8') if body else None
raw_response = urllib_request.urlopen(req, binary_body).read()
except urllib_request.HTTPError as err:
if err.code == 400:
raw_response = err.read()
else:
raise
return self._CheckGitkitError(raw_response)
| 696,236
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.