_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q263000 | MixcloudOauth.exchange_token | validation | def exchange_token(self, code):
"""
Exchange the authorization code for an access token.
"""
access_token_url = OAUTH_ROOT + '/access_token'
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'redirect_uri': self.redirect_uri,
'code': code,
}
resp = requests.get(access_token_url, params=params)
if not resp.ok:
raise MixcloudOauthError("Could not get access token.")
return resp.json()['access_token'] | python | {
"resource": ""
} |
q263001 | CountingLock.acquire | validation | def acquire(self, *args, **kwargs):
""" Wraps Lock.acquire """
with self._stat_lock:
self._waiting += 1
self._lock.acquire(*args, **kwargs)
with self._stat_lock:
self._locked = True
self._waiting -= 1 | python | {
"resource": ""
} |
q263002 | CountingLock.release | validation | def release(self):
""" Wraps Lock.release """
self._lock.release()
with self._stat_lock:
self._locked = False
self._last_released = datetime.now() | python | {
"resource": ""
} |
q263003 | DefaultCustomTypeCodec.default_decoder | validation | def default_decoder(self, obj):
"""Handle a dict that might contain a wrapped state for a custom type."""
typename, marshalled_state = self.unwrap_callback(obj)
if typename is None:
return obj
try:
cls, unmarshaller = self.serializer.unmarshallers[typename]
except KeyError:
raise LookupError('no unmarshaller found for type "{}"'.format(typename)) from None
if cls is not None:
instance = cls.__new__(cls)
unmarshaller(instance, marshalled_state)
return instance
else:
return unmarshaller(marshalled_state) | python | {
"resource": ""
} |
q263004 | DefaultCustomTypeCodec.wrap_state_dict | validation | def wrap_state_dict(self, typename: str, state) -> Dict[str, Any]:
"""
Wrap the marshalled state in a dictionary.
The returned dictionary has two keys, corresponding to the ``type_key`` and ``state_key``
options. The former holds the type name and the latter holds the marshalled state.
:param typename: registered name of the custom type
:param state: the marshalled state of the object
:return: an object serializable by the serializer
"""
return {self.type_key: typename, self.state_key: state} | python | {
"resource": ""
} |
q263005 | publish | validation | def publish(quiet, dataset_uri):
"""Enable HTTP access to a dataset.
This only works on datasets in some systems. For example, datasets stored
in AWS S3 object storage and Microsoft Azure Storage can be published as
datasets accessible over HTTP. A published dataset is world readable.
"""
access_uri = http_publish(dataset_uri)
if not quiet:
click.secho("Dataset accessible at ", nl=False, fg="green")
click.secho(access_uri) | python | {
"resource": ""
} |
q263006 | _prompt_for_values | validation | def _prompt_for_values(d):
"""Update the descriptive metadata interactively.
Uses values entered by the user. Note that the function keeps recursing
whenever a value is another ``CommentedMap`` or a ``list``. The
function works as passing dictionaries and lists into a function edits
the values in place.
"""
for key, value in d.items():
if isinstance(value, CommentedMap):
_prompt_for_values(value)
elif isinstance(value, list):
for item in value:
_prompt_for_values(item)
else:
typ = type(value)
if isinstance(value, ScalarFloat): # Deal with ruamel.yaml floats.
typ = float
new_value = click.prompt(key, type=typ, default=value)
d[key] = new_value
return d | python | {
"resource": ""
} |
q263007 | create | validation | def create(quiet, name, base_uri, symlink_path):
"""Create a proto dataset."""
_validate_name(name)
admin_metadata = dtoolcore.generate_admin_metadata(name)
parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)
if parsed_base_uri.scheme == "symlink":
if symlink_path is None:
raise click.UsageError("Need to specify symlink path using the -s/--symlink-path option") # NOQA
if symlink_path:
base_uri = dtoolcore.utils.sanitise_uri(
"symlink:" + parsed_base_uri.path
)
parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)
# Create the dataset.
proto_dataset = dtoolcore.generate_proto_dataset(
admin_metadata=admin_metadata,
base_uri=dtoolcore.utils.urlunparse(parsed_base_uri),
config_path=CONFIG_PATH)
# If we are creating a symlink dataset we need to set the symlink_path
# attribute on the storage broker.
if symlink_path:
symlink_abspath = os.path.abspath(symlink_path)
proto_dataset._storage_broker.symlink_path = symlink_abspath
try:
proto_dataset.create()
except dtoolcore.storagebroker.StorageBrokerOSError as err:
raise click.UsageError(str(err))
proto_dataset.put_readme("")
if quiet:
click.secho(proto_dataset.uri)
else:
# Give the user some feedback and hints on what to do next.
click.secho("Created proto dataset ", nl=False, fg="green")
click.secho(proto_dataset.uri)
click.secho("Next steps: ")
step = 1
if parsed_base_uri.scheme != "symlink":
click.secho("{}. Add raw data, eg:".format(step))
click.secho(
" dtool add item my_file.txt {}".format(proto_dataset.uri),
fg="cyan")
if parsed_base_uri.scheme == "file":
# Find the abspath of the data directory for user feedback.
data_path = proto_dataset._storage_broker._data_abspath
click.secho(" Or use your system commands, e.g: ")
click.secho(
" mv my_data_directory {}/".format(data_path),
fg="cyan"
)
step = step + 1
click.secho("{}. Add descriptive metadata, e.g: ".format(step))
click.secho(
" dtool readme interactive {}".format(proto_dataset.uri),
fg="cyan")
step = step + 1
click.secho(
"{}. Convert the proto dataset into a dataset: ".format(step)
)
click.secho(" dtool freeze {}".format(proto_dataset.uri), fg="cyan") | python | {
"resource": ""
} |
q263008 | interactive | validation | def interactive(proto_dataset_uri):
"""Interactive prompting to populate the readme."""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH)
# Create an CommentedMap representation of the yaml readme template.
readme_template = _get_readme_template()
yaml = YAML()
yaml.explicit_start = True
yaml.indent(mapping=2, sequence=4, offset=2)
descriptive_metadata = yaml.load(readme_template)
descriptive_metadata = _prompt_for_values(descriptive_metadata)
# Write out the descriptive metadata to the readme file.
stream = StringIO()
yaml.dump(descriptive_metadata, stream)
proto_dataset.put_readme(stream.getvalue())
click.secho("Updated readme ", fg="green")
click.secho("To edit the readme using your default editor:")
click.secho(
"dtool readme edit {}".format(proto_dataset_uri),
fg="cyan") | python | {
"resource": ""
} |
q263009 | edit | validation | def edit(dataset_uri):
"""Default editor updating of readme content.
"""
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
readme_content = dataset.get_readme_content()
try:
# Python2 compatibility.
readme_content = unicode(readme_content, "utf-8")
except NameError:
pass
edited_content = click.edit(readme_content)
if edited_content is not None:
_validate_and_put_readme(dataset, edited_content)
click.secho("Updated readme ", nl=False, fg="green")
else:
click.secho("Did not update readme ", nl=False, fg="red")
click.secho(dataset_uri) | python | {
"resource": ""
} |
q263010 | show | validation | def show(dataset_uri):
"""Show the descriptive metadata in the readme."""
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
readme_content = dataset.get_readme_content()
click.secho(readme_content) | python | {
"resource": ""
} |
q263011 | write | validation | def write(proto_dataset_uri, input):
"""Use YAML from a file or stdin to populate the readme.
To stream content from stdin use "-", e.g.
echo "desc: my data" | dtool readme write <DS_URI> -
"""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri
)
_validate_and_put_readme(proto_dataset, input.read()) | python | {
"resource": ""
} |
q263012 | item | validation | def item(proto_dataset_uri, input_file, relpath_in_dataset):
"""Add a file to the proto dataset."""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
proto_dataset_uri,
config_path=CONFIG_PATH)
if relpath_in_dataset == "":
relpath_in_dataset = os.path.basename(input_file)
proto_dataset.put_item(input_file, relpath_in_dataset) | python | {
"resource": ""
} |
q263013 | metadata | validation | def metadata(proto_dataset_uri, relpath_in_dataset, key, value):
"""Add metadata to a file in the proto dataset."""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH)
proto_dataset.add_item_metadata(
handle=relpath_in_dataset,
key=key,
value=value) | python | {
"resource": ""
} |
q263014 | freeze | validation | def freeze(proto_dataset_uri):
"""Convert a proto dataset into a dataset.
This step is carried out after all files have been added to the dataset.
Freezing a dataset finalizes it with a stamp marking it as frozen.
"""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH
)
num_items = len(list(proto_dataset._identifiers()))
max_files_limit = int(dtoolcore.utils.get_config_value(
"DTOOL_MAX_FILES_LIMIT",
CONFIG_PATH,
10000
))
assert isinstance(max_files_limit, int)
if num_items > max_files_limit:
click.secho(
"Too many items ({} > {}) in proto dataset".format(
num_items,
max_files_limit
),
fg="red"
)
click.secho("1. Consider splitting the dataset into smaller datasets")
click.secho("2. Consider packaging small files using tar")
click.secho("3. Increase the limit using the DTOOL_MAX_FILES_LIMIT")
click.secho(" environment variable")
sys.exit(2)
handles = [h for h in proto_dataset._storage_broker.iter_item_handles()]
for h in handles:
if not valid_handle(h):
click.secho(
"Invalid item name: {}".format(h),
fg="red"
)
click.secho("1. Consider renaming the item")
click.secho("2. Consider removing the item")
sys.exit(3)
with click.progressbar(length=len(list(proto_dataset._identifiers())),
label="Generating manifest") as progressbar:
try:
proto_dataset.freeze(progressbar=progressbar)
except dtoolcore.storagebroker.DiskStorageBrokerValidationWarning as e:
click.secho("")
click.secho(str(e), fg="red", nl=False)
sys.exit(4)
click.secho("Dataset frozen ", nl=False, fg="green")
click.secho(proto_dataset_uri) | python | {
"resource": ""
} |
q263015 | cp | validation | def cp(resume, quiet, dataset_uri, dest_base_uri):
"""Copy a dataset to a different location."""
_copy(resume, quiet, dataset_uri, dest_base_uri) | python | {
"resource": ""
} |
q263016 | compress | validation | def compress(obj, level=6, return_type="bytes"):
"""Compress anything to bytes or string.
:params obj:
:params level:
:params return_type: if bytes, then return bytes; if str, then return
base64.b64encode bytes in utf-8 string.
"""
if isinstance(obj, binary_type):
b = zlib.compress(obj, level)
elif isinstance(obj, string_types):
b = zlib.compress(obj.encode("utf-8"), level)
else:
b = zlib.compress(pickle.dumps(obj, protocol=2), level)
if return_type == "bytes":
return b
elif return_type == "str":
return base64.b64encode(b).decode("utf-8")
else:
raise ValueError("'return_type' has to be one of 'bytes', 'str'!") | python | {
"resource": ""
} |
q263017 | _ymd.find_probable_year_index | validation | def find_probable_year_index(self, tokens):
"""
attempt to deduce if a pre 100 year was lost
due to padded zeros being taken off
"""
for index, token in enumerate(self):
potential_year_tokens = _ymd.find_potential_year_tokens(
token, tokens)
if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2:
return index | python | {
"resource": ""
} |
q263018 | tzname_in_python2 | validation | def tzname_in_python2(namefunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
def adjust_encoding(*args, **kwargs):
name = namefunc(*args, **kwargs)
if name is not None and not PY3:
name = name.encode()
return name
return adjust_encoding | python | {
"resource": ""
} |
q263019 | _validate_fromutc_inputs | validation | def _validate_fromutc_inputs(f):
"""
The CPython version of ``fromutc`` checks that the input is a ``datetime``
object and that ``self`` is attached as its ``tzinfo``.
"""
@wraps(f)
def fromutc(self, dt):
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
return f(self, dt)
return fromutc | python | {
"resource": ""
} |
q263020 | tzrangebase.fromutc | validation | def fromutc(self, dt):
""" Given a datetime in UTC, return local time """
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Get transitions - if there are none, fixed offset
transitions = self.transitions(dt.year)
if transitions is None:
return dt + self.utcoffset(dt)
# Get the transition times in UTC
dston, dstoff = transitions
dston -= self._std_offset
dstoff -= self._std_offset
utc_transitions = (dston, dstoff)
dt_utc = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt_utc, utc_transitions)
if isdst:
dt_wall = dt + self._dst_offset
else:
dt_wall = dt + self._std_offset
_fold = int(not isdst and self.is_ambiguous(dt_wall))
return enfold(dt_wall, fold=_fold) | python | {
"resource": ""
} |
q263021 | strip_comment_line_with_symbol | validation | def strip_comment_line_with_symbol(line, start):
"""Strip comments from line string.
"""
parts = line.split(start)
counts = [len(findall(r'(?:^|[^"\\]|(?:\\\\|\\")+)(")', part))
for part in parts]
total = 0
for nr, count in enumerate(counts):
total += count
if total % 2 == 0:
return start.join(parts[:nr + 1]).rstrip()
else: # pragma: no cover
return line.rstrip() | python | {
"resource": ""
} |
q263022 | strip_comments | validation | def strip_comments(string, comment_symbols=frozenset(('#', '//'))):
"""Strip comments from json string.
:param string: A string containing json with comments started by comment_symbols.
:param comment_symbols: Iterable of symbols that start a line comment (default # or //).
:return: The string with the comments removed.
"""
lines = string.splitlines()
for k in range(len(lines)):
for symbol in comment_symbols:
lines[k] = strip_comment_line_with_symbol(lines[k], start=symbol)
return '\n'.join(lines) | python | {
"resource": ""
} |
q263023 | picknthweekday | validation | def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
if (wd.month != month):
wd -= ONEWEEK
return wd | python | {
"resource": ""
} |
q263024 | valuestodict | validation | def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dout = {}
size = winreg.QueryInfoKey(key)[1]
tz_res = None
for i in range(size):
key_name, value, dtype = winreg.EnumValue(key, i)
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
# that to a proper signed integer
if value & (1 << 31):
value = value - (1 << 32)
elif dtype == winreg.REG_SZ:
# If it's a reference to the tzres DLL, load the actual string
if value.startswith('@tzres'):
tz_res = tz_res or tzres()
value = tz_res.name_from_string(value)
value = value.rstrip('\x00') # Remove trailing nulls
dout[key_name] = value
return dout | python | {
"resource": ""
} |
q263025 | tzres.name_from_string | validation | def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset) | python | {
"resource": ""
} |
q263026 | gettz | validation | def gettz(name):
"""
This retrieves a time zone from the local zoneinfo tarball that is packaged
with dateutil.
:param name:
An IANA-style time zone name, as found in the zoneinfo file.
:return:
Returns a :class:`dateutil.tz.tzfile` time zone object.
.. warning::
It is generally inadvisable to use this function, and it is only
provided for API compatibility with earlier versions. This is *not*
equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
time zone based on the inputs, favoring system zoneinfo. This is ONLY
for accessing the dateutil-specific zoneinfo (which may be out of
date compared to the system zoneinfo).
.. deprecated:: 2.6
If you need to use a specific zoneinfofile over the system zoneinfo,
instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
:func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
Use :func:`get_zonefile_instance` to retrieve an instance of the
dateutil-provided zoneinfo.
"""
warnings.warn("zoneinfo.gettz() will be removed in future versions, "
"to use the dateutil-provided zoneinfo files, instantiate a "
"ZoneInfoFile object and use ZoneInfoFile.zones.get() "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].zones.get(name) | python | {
"resource": ""
} |
q263027 | gettz_db_metadata | validation | def gettz_db_metadata():
""" Get the zonefile metadata
See `zonefile_metadata`_
:returns:
A dictionary with the database metadata
.. deprecated:: 2.6
See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
"""
warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
"versions, to use the dateutil-provided zoneinfo files, "
"ZoneInfoFile object and query the 'metadata' attribute "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].metadata | python | {
"resource": ""
} |
q263028 | get_config | validation | def get_config(jid):
"""Get the configuration for the given JID based on XMPP_HTTP_UPLOAD_ACCESS.
If the JID does not match any rule, ``False`` is returned.
"""
acls = getattr(settings, 'XMPP_HTTP_UPLOAD_ACCESS', (('.*', False), ))
for regex, config in acls:
if isinstance(regex, six.string_types):
regex = [regex]
for subex in regex:
if re.search(subex, jid):
return config
return False | python | {
"resource": ""
} |
q263029 | datetime_exists | validation | def datetime_exists(dt, tz=None):
"""
Given a datetime and a time zone, determine whether or not a given datetime
would fall in a gap.
:param dt:
A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
is provided.)
:param tz:
A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
``None`` or not provided, the datetime's own time zone will be used.
:return:
Returns a boolean value whether or not the "wall time" exists in ``tz``.
"""
if tz is None:
if dt.tzinfo is None:
raise ValueError('Datetime is naive and no time zone provided.')
tz = dt.tzinfo
dt = dt.replace(tzinfo=None)
# This is essentially a test of whether or not the datetime can survive
# a round trip to UTC.
dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz)
dt_rt = dt_rt.replace(tzinfo=None)
return dt == dt_rt | python | {
"resource": ""
} |
q263030 | tzfile._set_tzdata | validation | def _set_tzdata(self, tzobj):
""" Set the time zone data of this object from a _tzfile object """
# Copy the relevant attributes over as private attributes
for attr in _tzfile.attrs:
setattr(self, '_' + attr, getattr(tzobj, attr)) | python | {
"resource": ""
} |
q263031 | relativedelta.normalized | validation | def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=1, hours=14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest
# microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond) | python | {
"resource": ""
} |
q263032 | _hash | validation | def _hash(secret: bytes, data: bytes, alg: str) -> bytes:
"""
Create a new HMAC hash.
:param secret: The secret used when hashing data.
:type secret: bytes
:param data: The data to hash.
:type data: bytes
:param alg: The algorithm to use when hashing `data`.
:type alg: str
:return: New HMAC hash.
:rtype: bytes
"""
algorithm = get_algorithm(alg)
return hmac \
.new(secret, msg=data, digestmod=algorithm) \
.digest() | python | {
"resource": ""
} |
q263033 | decode | validation | def decode(secret: Union[str, bytes], token: Union[str, bytes],
alg: str = default_alg) -> Tuple[dict, dict]:
"""
Decodes the given token's header and payload and validates the signature.
:param secret: The secret used to decode the token. Must match the
secret used when creating the token.
:type secret: Union[str, bytes]
:param token: The token to decode.
:type token: Union[str, bytes]
:param alg: The algorithm used to decode the token. Must match the
algorithm used when creating the token.
:type alg: str
:return: The decoded header and payload.
:rtype: Tuple[dict, dict]
"""
secret = util.to_bytes(secret)
token = util.to_bytes(token)
pre_signature, signature_segment = token.rsplit(b'.', 1)
header_b64, payload_b64 = pre_signature.split(b'.')
try:
header_json = util.b64_decode(header_b64)
header = json.loads(util.from_bytes(header_json))
except (json.decoder.JSONDecodeError, UnicodeDecodeError, ValueError):
raise InvalidHeaderError('Invalid header')
try:
payload_json = util.b64_decode(payload_b64)
payload = json.loads(util.from_bytes(payload_json))
except (json.decoder.JSONDecodeError, UnicodeDecodeError, ValueError):
raise InvalidPayloadError('Invalid payload')
if not isinstance(header, dict):
raise InvalidHeaderError('Invalid header: {}'.format(header))
if not isinstance(payload, dict):
raise InvalidPayloadError('Invalid payload: {}'.format(payload))
signature = util.b64_decode(signature_segment)
calculated_signature = _hash(secret, pre_signature, alg)
if not compare_signature(signature, calculated_signature):
raise InvalidSignatureError('Invalid signature')
return header, payload | python | {
"resource": ""
} |
q263034 | compare_signature | validation | def compare_signature(expected: Union[str, bytes],
actual: Union[str, bytes]) -> bool:
"""
Compares the given signatures.
:param expected: The expected signature.
:type expected: Union[str, bytes]
:param actual: The actual signature.
:type actual: Union[str, bytes]
:return: Do the signatures match?
:rtype: bool
"""
expected = util.to_bytes(expected)
actual = util.to_bytes(actual)
return hmac.compare_digest(expected, actual) | python | {
"resource": ""
} |
q263035 | compare_token | validation | def compare_token(expected: Union[str, bytes],
actual: Union[str, bytes]) -> bool:
"""
Compares the given tokens.
:param expected: The expected token.
:type expected: Union[str, bytes]
:param actual: The actual token.
:type actual: Union[str, bytes]
:return: Do the tokens match?
:rtype: bool
"""
expected = util.to_bytes(expected)
actual = util.to_bytes(actual)
_, expected_sig_seg = expected.rsplit(b'.', 1)
_, actual_sig_seg = actual.rsplit(b'.', 1)
expected_sig = util.b64_decode(expected_sig_seg)
actual_sig = util.b64_decode(actual_sig_seg)
return compare_signature(expected_sig, actual_sig) | python | {
"resource": ""
} |
q263036 | Jwt.valid | validation | def valid(self, time: int = None) -> bool:
"""
Is the token valid? This method only checks the timestamps within the
token and compares them against the current time if none is provided.
:param time: The timestamp to validate against
:type time: Union[int, None]
:return: The validity of the token.
:rtype: bool
"""
if time is None:
epoch = datetime(1970, 1, 1, 0, 0, 0)
now = datetime.utcnow()
time = int((now - epoch).total_seconds())
if isinstance(self.valid_from, int) and time < self.valid_from:
return False
if isinstance(self.valid_to, int) and time > self.valid_to:
return False
return True | python | {
"resource": ""
} |
q263037 | Jwt._pop_claims_from_payload | validation | def _pop_claims_from_payload(self):
"""
Check for registered claims in the payload and move them to the
registered_claims property, overwriting any extant claims.
"""
claims_in_payload = [k for k in self.payload.keys() if
k in registered_claims.values()]
for name in claims_in_payload:
self.registered_claims[name] = self.payload.pop(name) | python | {
"resource": ""
} |
q263038 | Jwt.encode | validation | def encode(self) -> str:
"""
Create a token based on the data held in the class.
:return: A new token
:rtype: str
"""
payload = {}
payload.update(self.registered_claims)
payload.update(self.payload)
return encode(self.secret, payload, self.alg, self.header) | python | {
"resource": ""
} |
q263039 | Jwt.decode | validation | def decode(secret: Union[str, bytes], token: Union[str, bytes],
alg: str = default_alg) -> 'Jwt':
"""
Decodes the given token into an instance of `Jwt`.
:param secret: The secret used to decode the token. Must match the
secret used when creating the token.
:type secret: Union[str, bytes]
:param token: The token to decode.
:type token: Union[str, bytes]
:param alg: The algorithm used to decode the token. Must match the
algorithm used when creating the token.
:type alg: str
:return: The decoded token.
:rtype: `Jwt`
"""
header, payload = decode(secret, token, alg)
return Jwt(secret, payload, alg, header) | python | {
"resource": ""
} |
q263040 | Jwt.compare | validation | def compare(self, jwt: 'Jwt', compare_dates: bool = False) -> bool:
"""
Compare against another `Jwt`.
:param jwt: The token to compare against.
:type jwt: Jwt
:param compare_dates: Should the comparision take dates into account?
:type compare_dates: bool
:return: Are the two Jwt's the same?
:rtype: bool
"""
if self.secret != jwt.secret:
return False
if self.payload != jwt.payload:
return False
if self.alg != jwt.alg:
return False
if self.header != jwt.header:
return False
expected_claims = self.registered_claims
actual_claims = jwt.registered_claims
if not compare_dates:
strip = ['exp', 'nbf', 'iat']
expected_claims = {k: {v if k not in strip else None} for k, v in
expected_claims.items()}
actual_claims = {k: {v if k not in strip else None} for k, v in
actual_claims.items()}
if expected_claims != actual_claims:
return False
return True | python | {
"resource": ""
} |
q263041 | UploadView.get | validation | def get(self, request, hash, filename):
"""Download a file."""
if _ws_download is True:
return HttpResponseForbidden()
upload = Upload.objects.uploaded().get(hash=hash, name=filename)
return FileResponse(upload.file, content_type=upload.type) | python | {
"resource": ""
} |
q263042 | is_compressed_json_file | validation | def is_compressed_json_file(abspath):
"""Test a file is a valid json file.
- *.json: uncompressed, utf-8 encode json file
- *.js: uncompressed, utf-8 encode json file
- *.gz: compressed, utf-8 encode json file
"""
abspath = abspath.lower()
fname, ext = os.path.splitext(abspath)
if ext in [".json", ".js"]:
is_compressed = False
elif ext == ".gz":
is_compressed = True
else:
raise ValueError(
"'%s' is not a valid json file. "
"extension has to be '.json' or '.js' for uncompressed, '.gz' "
"for compressed." % abspath)
return is_compressed | python | {
"resource": ""
} |
q263043 | SupportBuiltInDataType.dump_set | validation | def dump_set(self, obj, class_name=set_class_name):
"""
``set`` dumper.
"""
return {"$" + class_name: [self._json_convert(item) for item in obj]} | python | {
"resource": ""
} |
q263044 | SupportBuiltInDataType.dump_deque | validation | def dump_deque(self, obj, class_name="collections.deque"):
"""
``collections.deque`` dumper.
"""
return {"$" + class_name: [self._json_convert(item) for item in obj]} | python | {
"resource": ""
} |
q263045 | SupportBuiltInDataType.dump_OrderedDict | validation | def dump_OrderedDict(self, obj, class_name="collections.OrderedDict"):
"""
``collections.OrderedDict`` dumper.
"""
return {
"$" + class_name: [
(key, self._json_convert(value)) for key, value in iteritems(obj)
]
} | python | {
"resource": ""
} |
q263046 | SupportNumpyArray.dump_nparray | validation | def dump_nparray(self, obj, class_name=numpy_ndarray_class_name):
"""
``numpy.ndarray`` dumper.
"""
return {"$" + class_name: self._json_convert(obj.tolist())} | python | {
"resource": ""
} |
q263047 | _invalidates_cache | validation | def _invalidates_cache(f):
"""
Decorator for rruleset methods which may invalidate the
cached length.
"""
def inner_func(self, *args, **kwargs):
rv = f(self, *args, **kwargs)
self._invalidate_cache()
return rv
return inner_func | python | {
"resource": ""
} |
q263048 | rrulebase.before | validation | def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last | python | {
"resource": ""
} |
q263049 | rrulebase.after | validation | def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None | python | {
"resource": ""
} |
q263050 | rrulebase.xafter | validation | def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
def comp(dc, dtc): return dc >= dtc
else:
def comp(dc, dtc): return dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
if count is not None:
n += 1
if n > count:
break
yield d | python | {
"resource": ""
} |
q263051 | rrule.replace | validation | def replace(self, **kwargs):
"""Return new rrule with same attributes except for those attributes given new
values by whichever keyword arguments are specified."""
new_kwargs = {"interval": self._interval,
"count": self._count,
"dtstart": self._dtstart,
"freq": self._freq,
"until": self._until,
"wkst": self._wkst,
"cache": False if self._cache is None else True}
new_kwargs.update(self._original_rule)
new_kwargs.update(kwargs)
return rrule(**new_kwargs) | python | {
"resource": ""
} |
q263052 | run_excel_to_html | validation | def run_excel_to_html():
"""
Run the excel_to_html function from the
command-line.
Args:
-p path to file
-s name of the sheet to convert
-css classes to apply
-m attempt to combine merged cells
-c caption for accessibility
-su summary for accessibility
-d details for accessibility
Example use:
excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true
"""
# Capture commandline arguments. prog='' argument must
# match the command name in setup.py entry_points
parser = argparse.ArgumentParser(prog='excel_to_html')
parser.add_argument('-p', nargs='?', help='Path to an excel file for conversion.')
parser.add_argument(
'-s',
nargs='?',
help='The name of a sheet in our excel file. Defaults to "Sheet1".',
)
parser.add_argument(
'-css', nargs='?', help='Space separated css classes to append to the table.'
)
parser.add_argument(
'-m', action='store_true', help='Merge, attempt to combine merged cells.'
)
parser.add_argument(
'-c', nargs='?', help='Caption for creating an accessible table.'
)
parser.add_argument(
'-d',
nargs='?',
help='Two strings separated by a | character. The first string \
is for the html "summary" attribute and the second string is for the html "details" attribute. \
both values must be provided and nothing more.',
)
parser.add_argument(
'-r', action='store_true', help='Row headers. Does the table have row headers?'
)
args = parser.parse_args()
inputs = {
'p': args.p,
's': args.s,
'css': args.css,
'm': args.m,
'c': args.c,
'd': args.d,
'r': args.r,
}
p = inputs['p']
s = inputs['s'] if inputs['s'] else 'Sheet1'
css = inputs['css'] if inputs['css'] else ''
m = inputs['m'] if inputs['m'] else False
c = inputs['c'] if inputs['c'] else ''
d = inputs['d'].split('|') if inputs['d'] else []
r = inputs['r'] if inputs['r'] else False
html = fp.excel_to_html(
p, sheetname=s, css_classes=css, caption=c, details=d, row_headers=r, merge=m
)
print(html) | python | {
"resource": ""
} |
q263053 | ConvertPHP.get_inner_template | validation | def get_inner_template(self, language, template_type, indentation, key, val):
"""
Gets the requested template for the given language.
Args:
language: string, the language of the template to look for.
template_type: string, 'iterable' or 'singular'.
An iterable template is needed when the value is an iterable
and needs more unpacking, e.g. list, tuple. A singular template
is needed when unpacking is complete and the value is singular,
e.g. string, int, float.
indentation: int, the indentation level.
key: multiple types, the array key.
val: multiple types, the array values
Returns:
string, template formatting for arrays by language.
"""
#Language specific inner templates
inner_templates = {'php' : {
'iterable' : '%s%s => array \n%s( \n%s%s),\n' % (indentation, key, indentation, val, indentation),
'singular' : '%s%s => %s, \n' % (indentation, key, val) },
'javascript' : {
'iterable' : '%s%s : {\n%s\n%s},\n' % (indentation, key, val, indentation),
'singular' : '%s%s: %s,\n' % (indentation, key, val)},
'ocaml' : {
'iterable' : '%s[| (%s, (\n%s\n%s))|] ;;\n' % (indentation, key, val, indentation),
'singular' : '%s(%s, %s);\n' % (indentation, key, val)}}
return inner_templates[language][template_type] | python | {
"resource": ""
} |
q263054 | ConvertPHP.translate_array | validation | def translate_array(self, string, language, level=3, retdata=False):
"""Unserializes a serialized php array and prints it to
the console as a data structure in the specified language.
Used to translate or convert a php array into a data structure
in another language. Currently supports, PHP, Python, Javascript,
and JSON.
Args:
string: a string of serialized php
language: a string representing the desired output
format for the array.
level: integer, indentation level in spaces.
Defaults to 3.
retdata: boolean, the method will return the string
in addition to printing it if set to True. Defaults
to false.
Returns:
None but prints a string to the console if retdata is
False, otherwise returns a string.
"""
language = language.lower()
assert self.is_built_in(language) or language in self.outer_templates, \
"Sorry, " + language + " is not a supported language."
# Serialized data converted to a python data structure (list of tuples)
data = phpserialize.loads(bytes(string, 'utf-8'), array_hook=list, decode_strings=True)
# If language conversion is supported by python avoid recursion entirely
# and use a built in library
if self.is_built_in(language):
self.get_built_in(language, level, data)
print(self)
return self.data_structure if retdata else None
# The language is not supported. Use recursion to build a data structure.
def loop_print(iterable, level=3):
"""
Loops over a python representation of a php array
(list of tuples) and constructs a representation in another language.
Translates a php array into another structure.
Args:
iterable: list or tuple to unpack.
level: integer, number of spaces to use for indentation
"""
retval = ''
indentation = ' ' * level
# Base case - variable is not an iterable
if not self.is_iterable(iterable) or isinstance(iterable, str):
non_iterable = str(iterable)
return str(non_iterable)
# Recursive case
for item in iterable:
# If item is a tuple it should be a key, value pair
if isinstance(item, tuple) and len(item) == 2:
# Get the key value pair
key = item[0]
val = loop_print(item[1], level=level+3)
# Translate special values
val = self.translate_val(language, val) if language in self.lang_specific_values \
and val in self.lang_specific_values[language] else val
# Convert keys to their properly formatted strings
# Integers are not quoted as array keys
key = str(key) if isinstance(key, int) else '\'' + str(key) + '\''
# The first item is a key and the second item is an iterable, boolean
needs_unpacking = hasattr(item[0],'__iter__') == False \
and hasattr(item[1],'__iter__') == True
# The second item is an iterable
if needs_unpacking:
retval += self.get_inner_template(language, 'iterable', indentation, key, val)
# The second item is not an iterable
else:
# Convert values to their properly formatted strings
# Integers and booleans are not quoted as array values
val = str(val) if val.isdigit() or val in self.lang_specific_values[language].values() else '\'' + str(val) + '\''
retval += self.get_inner_template(language, 'singular', indentation, key, val)
return retval
# Execute the recursive call in language specific wrapper template
self.data_structure = self.outer_templates[language] % (loop_print(data))
print(self)
return self.data_structure if retdata else None | python | {
"resource": ""
} |
q263055 | get | validation | def get():
""" Only API function for the config module.
:return: {dict} loaded validated configuration.
"""
config = {}
try:
config = _load_config()
except IOError:
try:
_create_default_config()
config = _load_config()
except IOError as e:
raise ConfigError(_FILE_CREATION_ERROR.format(e.args[0]))
except SyntaxError as e:
raise ConfigError(_JSON_SYNTAX_ERROR.format(e.args[0]))
except Exception:
raise ConfigError(_JSON_SYNTAX_ERROR.format('Yaml syntax error..'))
try:
_validate(config)
except KeyError as e:
raise ConfigError(_MANDATORY_KEY_ERROR.format(e.args[0]))
except SyntaxError as e:
raise ConfigError(_INVALID_KEY_ERROR.format(e.args[0]))
except ValueError as e:
raise ConfigError(_INVALID_VALUE_ERROR.format(e.args[0]))
config['projects-path'] = os.path.expanduser(config['projects-path'])
_complete_config(config)
return config | python | {
"resource": ""
} |
q263056 | reusable | validation | def reusable(func):
"""Create a reusable class from a generator function
Parameters
----------
func: GeneratorCallable[T_yield, T_send, T_return]
the function to wrap
Note
----
* the callable must have an inspectable signature
* If bound to a class, the new reusable generator is callable as a method.
To opt out of this, add a :func:`staticmethod` decorator above
this decorator.
"""
sig = signature(func)
origin = func
while hasattr(origin, '__wrapped__'):
origin = origin.__wrapped__
return type(
origin.__name__,
(ReusableGenerator, ),
dict([
('__doc__', origin.__doc__),
('__module__', origin.__module__),
('__signature__', sig),
('__wrapped__', staticmethod(func)),
] + [
(name, property(compose(itemgetter(name),
attrgetter('_bound_args.arguments'))))
for name in sig.parameters
] + ([
('__qualname__', origin.__qualname__),
] if sys.version_info > (3, ) else []))) | python | {
"resource": ""
} |
q263057 | sendreturn | validation | def sendreturn(gen, value):
"""Send an item into a generator expecting a final return value
Parameters
----------
gen: ~typing.Generator[T_yield, T_send, T_return]
the generator to send the value to
value: T_send
the value to send
Raises
------
RuntimeError
if the generator did not return as expected
Returns
-------
T_return
the generator's return value
"""
try:
gen.send(value)
except StopIteration as e:
return stopiter_value(e)
else:
raise RuntimeError('generator did not return as expected') | python | {
"resource": ""
} |
q263058 | imap_send | validation | def imap_send(func, gen):
"""Apply a function to all ``send`` values of a generator
Parameters
----------
func: ~typing.Callable[[T_send], T_mapped]
the function to apply
gen: Generable[T_yield, T_mapped, T_return]
the generator iterable.
Returns
-------
~typing.Generator[T_yield, T_send, T_return]
the mapped generator
"""
gen = iter(gen)
assert _is_just_started(gen)
yielder = yield_from(gen)
for item in yielder:
with yielder:
yielder.send(func((yield item)))
return_(yielder.result) | python | {
"resource": ""
} |
q263059 | bug_info | validation | def bug_info(exc_type, exc_value, exc_trace):
"""Prints the traceback and invokes the ipython debugger on any exception
Only invokes ipydb if you are outside ipython or python interactive session.
So scripts must be called from OS shell in order for exceptions to ipy-shell-out.
Dependencies:
Needs `pip install ipdb`
Arguments:
exc_type (type): The exception type/class (e.g. RuntimeError)
exc_value (Exception): The exception instance (e.g. the error message passed to the Exception constructor)
exc_trace (Traceback): The traceback instance
References:
http://stackoverflow.com/a/242531/623735
Example Usage:
$ python -c 'from pug import debug;x=[];x[0]'
Traceback (most recent call last):
File "<string>", line 1, in <module>
IndexError: list index out of range
> <string>(1)<module>()
ipdb> x
[]
ipdb> locals()
{'__builtins__': <module '__builtin__' (built-in)>, '__package__': None, 'x': [], 'debug': <module 'pug.debug' from 'pug/debug.py'>, '__name__': '__main__', '__doc__': None}
ipdb>
"""
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# We are in interactive mode or don't have a tty-like device, so we call the default hook
sys.__excepthook__(exc_type, exc_value, exc_trace)
else:
# Need to import non-built-ins here, so if dependencies haven't been installed, both tracebacks will print
# (e.g. the ImportError and the Exception that got you here)
import ipdb
# We are NOT in interactive mode, print the exception
traceback.print_exception(exc_type, exc_value, exc_trace)
print
# Start the debugger in post-mortem mode.
ipdb.post_mortem(exc_trace) | python | {
"resource": ""
} |
q263060 | copy_web_file_to_local | validation | def copy_web_file_to_local(file_path, target_path):
"""Copies a file from its location on the web to a designated
place on the local machine.
Args:
file_path: Complete url of the file to copy, string (e.g. http://fool.com/input.css).
target_path: Path and name of file on the local machine, string. (e.g. /directory/output.css)
Returns:
None.
"""
response = urllib.request.urlopen(file_path)
f = open(target_path, 'w')
f.write(response.read())
f.close() | python | {
"resource": ""
} |
q263061 | get_line_count | validation | def get_line_count(fname):
"""Counts the number of lines in a file.
Args:
fname: string, name of the file.
Returns:
integer, the number of lines in the file.
"""
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1 | python | {
"resource": ""
} |
q263062 | indent_css | validation | def indent_css(f, output):
"""Indentes css that has not been indented and saves it to a new file.
A new file is created if the output destination does not already exist.
Args:
f: string, path to file.
output: string, path/name of the output file (e.g. /directory/output.css).
print type(response.read())
Returns:
None.
"""
line_count = get_line_count(f)
f = open(f, 'r+')
output = open(output, 'r+')
for line in range(line_count):
string = f.readline().rstrip()
if len(string) > 0:
if string[-1] == ";":
output.write(" " + string + "\n")
else:
output.write(string + "\n")
output.close()
f.close() | python | {
"resource": ""
} |
q263063 | add_newlines | validation | def add_newlines(f, output, char):
"""Adds line breaks after every occurance of a given character in a file.
Args:
f: string, path to input file.
output: string, path to output file.
Returns:
None.
"""
line_count = get_line_count(f)
f = open(f, 'r+')
output = open(output, 'r+')
for line in range(line_count):
string = f.readline()
string = re.sub(char, char + '\n', string)
output.write(string) | python | {
"resource": ""
} |
q263064 | reformat_css | validation | def reformat_css(input_file, output_file):
"""Reformats poorly written css. This function does not validate or fix errors in the code.
It only gives code the proper indentation.
Args:
input_file: string, path to the input file.
output_file: string, path to where the reformatted css should be saved. If the target file
doesn't exist, a new file is created.
Returns:
None.
"""
# Number of lines in the file.
line_count = get_line_count(input_file)
# Open source and target files.
f = open(input_file, 'r+')
output = open(output_file, 'w')
# Loop over every line in the file.
for line in range(line_count):
# Eliminate whitespace at the beginning and end of lines.
string = f.readline().strip()
# New lines after {
string = re.sub('\{', '{\n', string)
# New lines after ;
string = re.sub('; ', ';', string)
string = re.sub(';', ';\n', string)
# Eliminate whitespace before comments
string = re.sub('} /*', '}/*', string)
# New lines after }
string = re.sub('\}', '}\n', string)
# New lines at the end of comments
string = re.sub('\*/', '*/\n', string)
# Write to the output file.
output.write(string)
# Close the files.
output.close()
f.close()
# Indent the css.
indent_css(output_file, output_file)
# Make sure there's a space before every {
add_whitespace_before("{", output_file, output_file) | python | {
"resource": ""
} |
q263065 | clean_strings | validation | def clean_strings(iterable):
"""
Take a list of strings and clear whitespace
on each one. If a value in the list is not a
string pass it through untouched.
Args:
iterable: mixed list
Returns:
mixed list
"""
retval = []
for val in iterable:
try:
retval.append(val.strip())
except(AttributeError):
retval.append(val)
return retval | python | {
"resource": ""
} |
q263066 | future_value | validation | def future_value(present_value, annual_rate, periods_per_year, years):
"""
Calculates the future value of money invested at an anual interest rate,
x times per year, for a given number of years.
Args:
present_value: int or float, the current value of the money (principal).
annual_rate: float 0 to 1 e.g., .5 = 50%), the interest rate paid out.
periods_per_year: int, the number of times money is invested per year.
years: int, the number of years invested.
Returns:
Float, the future value of the money invested with compound interest.
"""
# The nominal interest rate per period (rate) is how much interest you earn during a
# particular length of time, before accounting for compounding. This is typically
# expressed as a percentage.
rate_per_period = annual_rate / float(periods_per_year)
# How many periods in the future the calculation is for.
periods = periods_per_year * years
return present_value * (1 + rate_per_period) ** periods | python | {
"resource": ""
} |
q263067 | triangle_area | validation | def triangle_area(point1, point2, point3):
"""
Uses Heron's formula to find the area of a triangle
based on the coordinates of three points.
Args:
point1: list or tuple, the x y coordinate of point one.
point2: list or tuple, the x y coordinate of point two.
point3: list or tuple, the x y coordinate of point three.
Returns:
The area of a triangle as a floating point number.
Requires:
The math module, point_distance().
"""
"""Lengths of the three sides of the triangle"""
a = point_distance(point1, point2)
b = point_distance(point1, point3)
c = point_distance(point2, point3)
"""Where s is the semiperimeter"""
s = (a + b + c) / 2.0
"""Return the area of the triangle (using Heron's formula)"""
return math.sqrt(s * (s - a) * (s - b) * (s - c)) | python | {
"resource": ""
} |
q263068 | median | validation | def median(data):
"""
Calculates the median of a list of integers or floating point numbers.
Args:
data: A list of integers or floating point numbers
Returns:
Sorts the list numerically and returns the middle number if the list has an odd number
of items. If the list contains an even number of items the mean of the two middle numbers
is returned.
"""
ordered = sorted(data)
length = len(ordered)
if length % 2 == 0:
return (
ordered[math.floor(length / 2) - 1] + ordered[math.floor(length / 2)]
) / 2.0
elif length % 2 != 0:
return ordered[math.floor(length / 2)] | python | {
"resource": ""
} |
q263069 | average | validation | def average(numbers, numtype='float'):
"""
Calculates the average or mean of a list of numbers
Args:
numbers: a list of integers or floating point numbers.
numtype: string, 'decimal' or 'float'; the type of number to return.
Returns:
The average (mean) of the numbers as a floating point number
or a Decimal object.
Requires:
The math module
"""
if type == 'decimal':
return Decimal(sum(numbers)) / len(numbers)
else:
return float(sum(numbers)) / len(numbers) | python | {
"resource": ""
} |
q263070 | variance | validation | def variance(numbers, type='population'):
"""
Calculates the population or sample variance of a list of numbers.
A large number means the results are all over the place, while a
small number means the results are comparatively close to the average.
Args:
numbers: a list of integers or floating point numbers to compare.
type: string, 'population' or 'sample', the kind of variance to be computed.
Returns:
The computed population or sample variance.
Defaults to population variance.
Requires:
The math module, average()
"""
mean = average(numbers)
variance = 0
for number in numbers:
variance += (mean - number) ** 2
if type == 'population':
return variance / len(numbers)
else:
return variance / (len(numbers) - 1) | python | {
"resource": ""
} |
q263071 | get_percentage | validation | def get_percentage(a, b, i=False, r=False):
"""
Finds the percentage of one number over another.
Args:
a: The number that is a percent, int or float.
b: The base number that a is a percent of, int or float.
i: Optional boolean integer. True if the user wants the result returned as
a whole number. Assumes False.
r: Optional boolean round. True if the user wants the result rounded.
Rounds to the second decimal point on floating point numbers. Assumes False.
Returns:
The argument a as a percentage of b. Throws a warning if integer is set to True
and round is set to False.
"""
# Round to the second decimal
if i is False and r is True:
percentage = round(100.0 * (float(a) / b), 2)
# Round to the nearest whole number
elif (i is True and r is True) or (i is True and r is False):
percentage = int(round(100 * (float(a) / b)))
# A rounded number and an integer were requested
if r is False:
warnings.warn(
"If integer is set to True and Round is set to False, you will still get a rounded number if you pass floating point numbers as arguments."
)
# A precise unrounded decimal
else:
percentage = 100.0 * (float(a) / b)
return percentage | python | {
"resource": ""
} |
q263072 | DateTimeUtils.get_datetime_string | validation | def get_datetime_string(datetime_obj):
'''
Get datetime string from datetime object
:param datetime datetime_obj: datetime object
:return: datetime string
:rtype: str
'''
if isinstance(datetime_obj, datetime):
dft = DTFormat()
return datetime_obj.strftime(dft.datetime_format)
return None | python | {
"resource": ""
} |
q263073 | attr | validation | def attr(prev, attr_name):
"""attr pipe can extract attribute value of object.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param attr_name: The name of attribute
:type attr_name: str
:returns: generator
"""
for obj in prev:
if hasattr(obj, attr_name):
yield getattr(obj, attr_name) | python | {
"resource": ""
} |
q263074 | attrs | validation | def attrs(prev, attr_names):
"""attrs pipe can extract attribute values of object.
If attr_names is a list and its item is not a valid attribute of
prev's object. It will be excluded from yielded dict.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param attr_names: The list of attribute names
:type attr_names: str of list
:returns: generator
"""
for obj in prev:
attr_values = []
for name in attr_names:
if hasattr(obj, name):
attr_values.append(getattr(obj, name))
yield attr_values | python | {
"resource": ""
} |
q263075 | attrdict | validation | def attrdict(prev, attr_names):
"""attrdict pipe can extract attribute values of object into a dict.
The argument attr_names can be a list or a dict.
If attr_names is a list and its item is not a valid attribute of
prev's object. It will be excluded from yielded dict.
If attr_names is dict and the key doesn't exist in prev's object.
the value of corresponding attr_names key will be copy to yielded dict.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param attr_names: The list or dict of attribute names
:type attr_names: str of list or dict
:returns: generator
"""
if isinstance(attr_names, dict):
for obj in prev:
attr_values = dict()
for name in attr_names.keys():
if hasattr(obj, name):
attr_values[name] = getattr(obj, name)
else:
attr_values[name] = attr_names[name]
yield attr_values
else:
for obj in prev:
attr_values = dict()
for name in attr_names:
if hasattr(obj, name):
attr_values[name] = getattr(obj, name)
yield attr_values | python | {
"resource": ""
} |
q263076 | flatten | validation | def flatten(prev, depth=sys.maxsize):
"""flatten pipe extracts nested item from previous pipe.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param depth: The deepest nested level to be extracted. 0 means no extraction.
:type depth: integer
:returns: generator
"""
def inner_flatten(iterable, curr_level, max_levels):
for i in iterable:
if hasattr(i, '__iter__') and curr_level < max_levels:
for j in inner_flatten(i, curr_level + 1, max_levels):
yield j
else:
yield i
for d in prev:
if hasattr(d, '__iter__') and depth > 0:
for inner_d in inner_flatten(d, 1, depth):
yield inner_d
else:
yield d | python | {
"resource": ""
} |
q263077 | values | validation | def values(prev, *keys, **kw):
"""values pipe extract value from previous pipe.
If previous pipe send a dictionary to values pipe, keys should contains
the key of dictionary which you want to get. If previous pipe send list or
tuple,
:param prev: The previous iterator of pipe.
:type prev: Pipe
:returns: generator
"""
d = next(prev)
if isinstance(d, dict):
yield [d[k] for k in keys if k in d]
for d in prev:
yield [d[k] for k in keys if k in d]
else:
yield [d[i] for i in keys if 0 <= i < len(d)]
for d in prev:
yield [d[i] for i in keys if 0 <= i < len(d)] | python | {
"resource": ""
} |
q263078 | pack | validation | def pack(prev, n, rest=False, **kw):
"""pack pipe takes n elements from previous generator and yield one
list to next.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param rest: Set True to allow to output the rest part of last elements.
:type prev: boolean
:param padding: Specify the padding element for the rest part of last elements.
:type prev: boolean
:returns: generator
:Example:
>>> result([1,2,3,4,5,6,7] | pack(3))
[[1, 2, 3], [4, 5, 6]]
>>> result([1,2,3,4,5,6,7] | pack(3, rest=True))
[[1, 2, 3], [4, 5, 6], [7,]]
>>> result([1,2,3,4,5,6,7] | pack(3, padding=None))
[[1, 2, 3], [4, 5, 6], [7, None, None]]
"""
if 'padding' in kw:
use_padding = True
padding = kw['padding']
else:
use_padding = False
padding = None
items = []
for i, data in enumerate(prev, 1):
items.append(data)
if (i % n) == 0:
yield items
items = []
if len(items) != 0 and rest:
if use_padding:
items.extend([padding, ] * (n - (i % n)))
yield items | python | {
"resource": ""
} |
q263079 | grep | validation | def grep(prev, pattern, *args, **kw):
"""The pipe greps the data passed from previous generator according to
given regular expression.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to filter out data.
:type pattern: str|unicode|re pattern object
:param inv: If true, invert the match condition.
:type inv: boolean
:param kw:
:type kw: dict
:returns: generator
"""
inv = False if 'inv' not in kw else kw.pop('inv')
pattern_obj = re.compile(pattern, *args, **kw)
for data in prev:
if bool(inv) ^ bool(pattern_obj.match(data)):
yield data | python | {
"resource": ""
} |
q263080 | match | validation | def match(prev, pattern, *args, **kw):
"""The pipe greps the data passed from previous generator according to
given regular expression. The data passed to next pipe is MatchObject
, dict or tuple which determined by 'to' in keyword argument.
By default, match pipe yields MatchObject. Use 'to' in keyword argument
to change the type of match result.
If 'to' is dict, yield MatchObject.groupdict().
If 'to' is tuple, yield MatchObject.groups().
If 'to' is list, yield list(MatchObject.groups()).
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to filter data.
:type pattern: str|unicode
:param to: What data type the result should be stored. dict|tuple|list
:type to: type
:returns: generator
"""
to = 'to' in kw and kw.pop('to')
pattern_obj = re.compile(pattern, *args, **kw)
if to is dict:
for data in prev:
match = pattern_obj.match(data)
if match is not None:
yield match.groupdict()
elif to is tuple:
for data in prev:
match = pattern_obj.match(data)
if match is not None:
yield match.groups()
elif to is list:
for data in prev:
match = pattern_obj.match(data)
if match is not None:
yield list(match.groups())
else:
for data in prev:
match = pattern_obj.match(data)
if match is not None:
yield match | python | {
"resource": ""
} |
q263081 | resplit | validation | def resplit(prev, pattern, *args, **kw):
"""The resplit pipe split previous pipe input by regular expression.
Use 'maxsplit' keyword argument to limit the number of split.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to split string.
:type pattern: str|unicode
"""
maxsplit = 0 if 'maxsplit' not in kw else kw.pop('maxsplit')
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.split(s, maxsplit=maxsplit) | python | {
"resource": ""
} |
q263082 | sub | validation | def sub(prev, pattern, repl, *args, **kw):
"""sub pipe is a wrapper of re.sub method.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern string.
:type pattern: str|unicode
:param repl: Check repl argument in re.sub method.
:type repl: str|unicode|callable
"""
count = 0 if 'count' not in kw else kw.pop('count')
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.sub(repl, s, count=count) | python | {
"resource": ""
} |
q263083 | wildcard | validation | def wildcard(prev, pattern, *args, **kw):
"""wildcard pipe greps data passed from previous generator
according to given regular expression.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The wildcard string which used to filter data.
:type pattern: str|unicode|re pattern object
:param inv: If true, invert the match condition.
:type inv: boolean
:returns: generator
"""
import fnmatch
inv = 'inv' in kw and kw.pop('inv')
pattern_obj = re.compile(fnmatch.translate(pattern), *args, **kw)
if not inv:
for data in prev:
if pattern_obj.match(data):
yield data
else:
for data in prev:
if not pattern_obj.match(data):
yield data | python | {
"resource": ""
} |
q263084 | stdout | validation | def stdout(prev, endl='\n', thru=False):
"""This pipe read data from previous iterator and write it to stdout.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param endl: The end-of-line symbol for each output.
:type endl: str
:param thru: If true, data will passed to next generator. If false, data
will be dropped.
:type thru: bool
:returns: generator
"""
for i in prev:
sys.stdout.write(str(i) + endl)
if thru:
yield i | python | {
"resource": ""
} |
q263085 | readline | validation | def readline(prev, filename=None, mode='r', trim=str.rstrip, start=1, end=sys.maxsize):
"""This pipe get filenames or file object from previous pipe and read the
content of file. Then, send the content of file line by line to next pipe.
The start and end parameters are used to limit the range of reading from file.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param filename: The files to be read. If None, use previous pipe input as filenames.
:type filename: None|str|unicode|list|tuple
:param mode: The mode to open file. default is 'r'
:type mode: str
:param trim: The function to trim the line before send to next pipe.
:type trim: function object.
:param start: if star is specified, only line number larger or equal to start will be sent.
:type start: integer
:param end: The last line number to read.
:type end: integer
:returns: generator
"""
if prev is None:
if filename is None:
raise Exception('No input available for readline.')
elif is_str_type(filename):
file_list = [filename, ]
else:
file_list = filename
else:
file_list = prev
for fn in file_list:
if isinstance(fn, file_type):
fd = fn
else:
fd = open(fn, mode)
try:
if start <= 1 and end == sys.maxsize:
for line in fd:
yield trim(line)
else:
for line_no, line in enumerate(fd, 1):
if line_no < start:
continue
yield trim(line)
if line_no >= end:
break
finally:
if fd != fn:
fd.close() | python | {
"resource": ""
} |
q263086 | sh | validation | def sh(prev, *args, **kw):
"""sh pipe execute shell command specified by args. If previous pipe exists,
read data from it and write it to stdin of shell process. The stdout of
shell process will be passed to next pipe object line by line.
A optional keyword argument 'trim' can pass a function into sh pipe. It is
used to trim the output from shell process. The default trim function is
str.rstrip. Therefore, any space characters in tail of
shell process output line will be removed.
For example:
py_files = result(sh('ls') | strip | wildcard('*.py'))
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param args: The command line arguments. It will be joined by space character.
:type args: list of string.
:param kw: arguments for subprocess.Popen.
:type kw: dictionary of options.
:returns: generator
"""
endl = '\n' if 'endl' not in kw else kw.pop('endl')
trim = None if 'trim' not in kw else kw.pop('trim')
if trim is None:
trim = bytes.rstrip if is_py3 else str.rstrip
cmdline = ' '.join(args)
if not cmdline:
if prev is not None:
for i in prev:
yield i
else:
while True:
yield None
process = subprocess.Popen(cmdline, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
**kw)
if prev is not None:
stdin_buffer = StringIO()
for i in prev:
stdin_buffer.write(i)
if endl:
stdin_buffer.write(endl)
if is_py3:
process.stdin.write(stdin_buffer.getvalue().encode('utf-8'))
else:
process.stdin.write(stdin_buffer.getvalue())
process.stdin.flush()
process.stdin.close()
stdin_buffer.close()
for line in process.stdout:
yield trim(line)
process.wait() | python | {
"resource": ""
} |
q263087 | walk | validation | def walk(prev, inital_path, *args, **kw):
"""This pipe wrap os.walk and yield absolute path one by one.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param args: The end-of-line symbol for each output.
:type args: list of string.
:param kw: The end-of-line symbol for each output.
:type kw: dictionary of options. Add 'endl' in kw to specify end-of-line symbol.
:returns: generator
"""
for dir_path, dir_names, filenames in os.walk(inital_path):
for filename in filenames:
yield os.path.join(dir_path, filename) | python | {
"resource": ""
} |
q263088 | join | validation | def join(prev, sep, *args, **kw):
'''alias of str.join'''
yield sep.join(prev, *args, **kw) | python | {
"resource": ""
} |
q263089 | substitute | validation | def substitute(prev, *args, **kw):
'''alias of string.Template.substitute'''
template_obj = string.Template(*args, **kw)
for data in prev:
yield template_obj.substitute(data) | python | {
"resource": ""
} |
q263090 | safe_substitute | validation | def safe_substitute(prev, *args, **kw):
'''alias of string.Template.safe_substitute'''
template_obj = string.Template(*args, **kw)
for data in prev:
yield template_obj.safe_substitute(data) | python | {
"resource": ""
} |
q263091 | to_str | validation | def to_str(prev, encoding=None):
"""Convert data from previous pipe with specified encoding."""
first = next(prev)
if isinstance(first, str):
if encoding is None:
yield first
for s in prev:
yield s
else:
yield first.encode(encoding)
for s in prev:
yield s.encode(encoding)
else:
if encoding is None:
encoding = sys.stdout.encoding or 'utf-8'
yield first.decode(encoding)
for s in prev:
yield s.decode(encoding) | python | {
"resource": ""
} |
q263092 | register_default_types | validation | def register_default_types():
"""Regiser all default type-to-pipe convertors."""
register_type(type, pipe.map)
register_type(types.FunctionType, pipe.map)
register_type(types.MethodType, pipe.map)
register_type(tuple, seq)
register_type(list, seq)
register_type(types.GeneratorType, seq)
register_type(string_type, sh)
register_type(unicode_type, sh)
register_type(file_type, fileobj)
if is_py3:
register_type(range, seq)
register_type(map, seq) | python | {
"resource": ""
} |
q263093 | Paginator.get_dict | validation | def get_dict(self):
'''
Convert Paginator instance to dict
:return: Paging data
:rtype: dict
'''
return dict(
current_page=self.current_page,
total_page_count=self.total_page_count,
items=self.items,
total_item_count=self.total_item_count,
page_size=self.page_size
) | python | {
"resource": ""
} |
q263094 | check_pidfile | validation | def check_pidfile(pidfile, debug):
"""Check that a process is not running more than once, using PIDFILE"""
# Check PID exists and see if the PID is running
if os.path.isfile(pidfile):
pidfile_handle = open(pidfile, 'r')
# try and read the PID file. If no luck, remove it
try:
pid = int(pidfile_handle.read())
pidfile_handle.close()
if check_pid(pid, debug):
return True
except:
pass
# PID is not active, remove the PID file
os.unlink(pidfile)
# Create a PID file, to ensure this is script is only run once (at a time)
pid = str(os.getpid())
open(pidfile, 'w').write(pid)
return False | python | {
"resource": ""
} |
q263095 | check_pid | validation | def check_pid(pid, debug):
"""This function will check whether a PID is currently running"""
try:
# A Kill of 0 is to check if the PID is active. It won't kill the process
os.kill(pid, 0)
if debug > 1:
print("Script has a PIDFILE where the process is still running")
return True
except OSError:
if debug > 1:
print("Script does not appear to be running")
return False | python | {
"resource": ""
} |
q263096 | disown | validation | def disown(debug):
"""This function will disown, so the Ardexa service can be restarted"""
# Get the current PID
pid = os.getpid()
cgroup_file = "/proc/" + str(pid) + "/cgroup"
try:
infile = open(cgroup_file, "r")
except IOError:
print("Could not open cgroup file: ", cgroup_file)
return False
# Read each line
for line in infile:
# Check if the line contains "ardexa.service"
if line.find("ardexa.service") == -1:
continue
# if the lines contains "name=", replace it with nothing
line = line.replace("name=", "")
# Split the line by commas
items_list = line.split(':')
accounts = items_list[1]
dir_str = accounts + "/ardexa.disown"
# If accounts is empty, continue
if not accounts:
continue
# Create the dir and all subdirs
full_dir = "/sys/fs/cgroup/" + dir_str
if not os.path.exists(full_dir):
os.makedirs(full_dir)
if debug >= 1:
print("Making directory: ", full_dir)
else:
if debug >= 1:
print("Directory already exists: ", full_dir)
# Add the PID to the file
full_path = full_dir + "/cgroup.procs"
prog_list = ["echo", str(pid), ">", full_path]
run_program(prog_list, debug, True)
# If this item contains a comma, then separate it, and reverse
# some OSes will need cpuacct,cpu reversed to actually work
if accounts.find(",") != -1:
acct_list = accounts.split(',')
accounts = acct_list[1] + "," + acct_list[0]
dir_str = accounts + "/ardexa.disown"
# Create the dir and all subdirs. But it may not work. So use a TRY
full_dir = "/sys/fs/cgroup/" + dir_str
try:
if not os.path.exists(full_dir):
os.makedirs(full_dir)
except:
continue
# Add the PID to the file
full_path = full_dir + "/cgroup.procs"
prog_list = ["echo", str(pid), ">", full_path]
run_program(prog_list, debug, True)
infile.close()
# For debug purposes only
if debug >= 1:
prog_list = ["cat", cgroup_file]
run_program(prog_list, debug, False)
# If there are any "ardexa.service" in the proc file. If so, exit with error
prog_list = ["grep", "-q", "ardexa.service", cgroup_file]
if run_program(prog_list, debug, False):
# There are entries still left in the file
return False
return True | python | {
"resource": ""
} |
q263097 | run_program | validation | def run_program(prog_list, debug, shell):
"""Run a program and check program return code Note that some commands don't work
well with Popen. So if this function is specifically called with 'shell=True',
then it will run the old 'os.system'. In which case, there is no program output
"""
try:
if not shell:
process = Popen(prog_list, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
retcode = process.returncode
if debug >= 1:
print("Program : ", " ".join(prog_list))
print("Return Code: ", retcode)
print("Stdout: ", stdout)
print("Stderr: ", stderr)
return bool(retcode)
else:
command = " ".join(prog_list)
os.system(command)
return True
except:
return False | python | {
"resource": ""
} |
q263098 | parse_address_list | validation | def parse_address_list(addrs):
"""Yield each integer from a complex range string like "1-9,12,15-20,23"
>>> list(parse_address_list('1-9,12,15-20,23'))
[1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 18, 19, 20, 23]
>>> list(parse_address_list('1-9,12,15-20,2-3-4'))
Traceback (most recent call last):
...
ValueError: format error in 2-3-4
"""
for addr in addrs.split(','):
elem = addr.split('-')
if len(elem) == 1: # a number
yield int(elem[0])
elif len(elem) == 2: # a range inclusive
start, end = list(map(int, elem))
for i in range(start, end+1):
yield i
else: # more than one hyphen
raise ValueError('format error in %s' % addr) | python | {
"resource": ""
} |
q263099 | _encode_ids | validation | def _encode_ids(*args):
"""
Do url-encode resource ids
"""
ids = []
for v in args:
if isinstance(v, basestring):
qv = v.encode('utf-8') if isinstance(v, unicode) else v
ids.append(urllib.quote(qv))
else:
qv = str(v)
ids.append(urllib.quote(qv))
return ';'.join(ids) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.