text stringlengths 81 112k |
|---|
Return the literal representation of a numeric tag.
def serialize_numeric(self, tag):
"""Return the literal representation of a numeric tag."""
str_func = int.__str__ if isinstance(tag, int) else float.__str__
return str_func(tag) + tag.suffix |
Return the literal representation of an array tag.
def serialize_array(self, tag):
"""Return the literal representation of an array tag."""
elements = self.comma.join(f'{el}{tag.item_suffix}' for el in tag)
return f'[{tag.array_prefix}{self.semicolon}{elements}]' |
Return the literal representation of a list tag.
def serialize_list(self, tag):
"""Return the literal representation of a list tag."""
separator, fmt = self.comma, '[{}]'
with self.depth():
if self.should_expand(tag):
separator, fmt = self.expand(separator, fmt)
return fmt.format(separator.join(map(self.serialize, tag))) |
Return the literal representation of a compound tag.
def serialize_compound(self, tag):
"""Return the literal representation of a compound tag."""
separator, fmt = self.comma, '{{{}}}'
with self.depth():
if self.should_expand(tag):
separator, fmt = self.expand(separator, fmt)
return fmt.format(separator.join(
f'{self.stringify_compound_key(key)}{self.colon}{self.serialize(value)}'
for key, value in tag.items()
)) |
Return the _column_map without unused optional fields
def populated_column_map(self):
'''Return the _column_map without unused optional fields'''
column_map = []
cls = self.model
for csv_name, field_pattern in cls._column_map:
# Separate the local field name from foreign columns
if '__' in field_pattern:
field_name = field_pattern.split('__', 1)[0]
else:
field_name = field_pattern
# Handle point fields
point_match = re_point.match(field_name)
if point_match:
field = None
else:
field = cls._meta.get_field(field_name)
# Only add optional columns if they are used in the records
if field and field.blank and not field.has_default():
kwargs = {field_name: get_blank_value(field)}
if self.exclude(**kwargs).exists():
column_map.append((csv_name, field_pattern))
else:
column_map.append((csv_name, field_pattern))
return column_map |
Return the objects in the target feed
def in_feed(self, feed):
'''Return the objects in the target feed'''
kwargs = {self.model._rel_to_feed: feed}
return self.filter(**kwargs) |
Import from the GTFS text file
def import_txt(cls, txt_file, feed, filter_func=None):
'''Import from the GTFS text file'''
# Setup the conversion from GTFS to Django Format
# Conversion functions
def no_convert(value): return value
def date_convert(value): return datetime.strptime(value, '%Y%m%d')
def bool_convert(value): return (value == '1')
def char_convert(value): return (value or '')
def null_convert(value): return (value or None)
def point_convert(value):
"""Convert latitude / longitude, strip leading +."""
if value.startswith('+'):
return value[1:]
else:
return (value or 0.0)
cache = {}
def default_convert(field):
def get_value_or_default(value):
if value == '' or value is None:
return field.get_default()
else:
return value
return get_value_or_default
def instance_convert(field, feed, rel_name):
def get_instance(value):
if value.strip():
related = field.related_model
key1 = "{}:{}".format(related.__name__, rel_name)
key2 = text_type(value)
# Load existing objects
if key1 not in cache:
pairs = related.objects.filter(
**{related._rel_to_feed: feed}).values_list(
rel_name, 'id')
cache[key1] = dict((text_type(x), i) for x, i in pairs)
# Create new?
if key2 not in cache[key1]:
kwargs = {
related._rel_to_feed: feed,
rel_name: value}
cache[key1][key2] = related.objects.create(
**kwargs).id
return cache[key1][key2]
else:
return None
return get_instance
# Check unique fields
column_names = [c for c, _ in cls._column_map]
for unique_field in cls._unique_fields:
assert unique_field in column_names, \
'{} not in {}'.format(unique_field, column_names)
# Map of field_name to converters from GTFS to Django format
val_map = dict()
name_map = dict()
point_map = dict()
for csv_name, field_pattern in cls._column_map:
# Separate the local field name from foreign columns
if '__' in field_pattern:
field_base, rel_name = field_pattern.split('__', 1)
field_name = field_base + '_id'
else:
field_name = field_base = field_pattern
# Use the field name in the name mapping
name_map[csv_name] = field_name
# Is it a point field?
point_match = re_point.match(field_name)
if point_match:
field = None
else:
field = cls._meta.get_field(field_base)
# Pick a conversion function for the field
if point_match:
converter = point_convert
elif isinstance(field, models.DateField):
converter = date_convert
elif isinstance(field, models.BooleanField):
converter = bool_convert
elif isinstance(field, models.CharField):
converter = char_convert
elif field.is_relation:
converter = instance_convert(field, feed, rel_name)
assert not isinstance(field, models.ManyToManyField)
elif field.null:
converter = null_convert
elif field.has_default():
converter = default_convert(field)
else:
converter = no_convert
if point_match:
index = int(point_match.group('index'))
point_map[csv_name] = (index, converter)
else:
val_map[csv_name] = converter
# Read and convert the source txt
csv_reader = reader(txt_file, skipinitialspace=True)
unique_line = dict()
count = 0
first = True
extra_counts = defaultdict(int)
new_objects = []
for row in csv_reader:
if first:
# Read the columns
columns = row
if columns[0].startswith(CSV_BOM):
columns[0] = columns[0][len(CSV_BOM):]
first = False
continue
if filter_func and not filter_func(zip(columns, row)):
continue
if not row:
continue
# Read a data row
fields = dict()
point_coords = [None, None]
ukey_values = {}
if cls._rel_to_feed == 'feed':
fields['feed'] = feed
for column_name, value in zip(columns, row):
if column_name not in name_map:
val = null_convert(value)
if val is not None:
fields.setdefault('extra_data', {})[column_name] = val
extra_counts[column_name] += 1
elif column_name in val_map:
fields[name_map[column_name]] = val_map[column_name](value)
else:
assert column_name in point_map
pos, converter = point_map[column_name]
point_coords[pos] = converter(value)
# Is it part of the unique key?
if column_name in cls._unique_fields:
ukey_values[column_name] = value
# Join the lat/long into a point
if point_map:
assert point_coords[0] and point_coords[1]
fields['point'] = "POINT(%s)" % (' '.join(point_coords))
# Is the item unique?
ukey = tuple(ukey_values.get(u) for u in cls._unique_fields)
if ukey in unique_line:
logger.warning(
'%s line %d is a duplicate of line %d, not imported.',
cls._filename, csv_reader.line_num, unique_line[ukey])
continue
else:
unique_line[ukey] = csv_reader.line_num
# Create after accumulating a batch
new_objects.append(cls(**fields))
if len(new_objects) % batch_size == 0: # pragma: no cover
cls.objects.bulk_create(new_objects)
count += len(new_objects)
logger.info(
"Imported %d %s",
count, cls._meta.verbose_name_plural)
new_objects = []
# Create remaining objects
if new_objects:
cls.objects.bulk_create(new_objects)
# Take note of extra fields
if extra_counts:
extra_columns = feed.meta.setdefault(
'extra_columns', {}).setdefault(cls.__name__, [])
for column in columns:
if column in extra_counts and column not in extra_columns:
extra_columns.append(column)
feed.save()
return len(unique_line) |
Export records as a GTFS comma-separated file
def export_txt(cls, feed):
'''Export records as a GTFS comma-separated file'''
objects = cls.objects.in_feed(feed)
# If no records, return None
if not objects.exists():
return
# Get the columns used in the dataset
column_map = objects.populated_column_map()
columns, fields = zip(*column_map)
extra_columns = feed.meta.get(
'extra_columns', {}).get(cls.__name__, [])
# Get sort order
if hasattr(cls, '_sort_order'):
sort_fields = cls._sort_order
else:
sort_fields = []
for field in fields:
base_field = field.split('__', 1)[0]
point_match = re_point.match(base_field)
if point_match:
continue
field_type = cls._meta.get_field(base_field)
assert not isinstance(field_type, ManyToManyField)
sort_fields.append(field)
# Create CSV writer
out = StringIO()
csv_writer = writer(out, lineterminator='\n')
# Write header row
header_row = [text_type(c) for c in columns]
header_row.extend(extra_columns)
write_text_rows(csv_writer, [header_row])
# Report the work to be done
total = objects.count()
logger.info(
'%d %s to export...',
total, cls._meta.verbose_name_plural)
# Populate related items cache
model_to_field_name = {}
cache = {}
for field_name in fields:
if '__' in field_name:
local_field_name, subfield_name = field_name.split('__', 1)
field = cls._meta.get_field(local_field_name)
field_type = field.related_model
model_name = field_type.__name__
if model_name in model_to_field_name:
# Already loaded this model under a different field name
cache[field_name] = cache[model_to_field_name[model_name]]
else:
# Load all feed data for this model
pairs = field_type.objects.in_feed(
feed).values_list('id', subfield_name)
cache[field_name] = dict(
(i, text_type(x)) for i, x in pairs)
cache[field_name][None] = u''
model_to_field_name[model_name] = field_name
# Assemble the rows, writing when we hit batch size
count = 0
rows = []
for item in objects.order_by(*sort_fields).iterator():
row = []
for csv_name, field_name in column_map:
obj = item
point_match = re_point.match(field_name)
if '__' in field_name:
# Return relations from cache
local_field_name = field_name.split('__', 1)[0]
field_id = getattr(obj, local_field_name + '_id')
row.append(cache[field_name][field_id])
elif point_match:
# Get the lat or long from the point
name, index = point_match.groups()
field = getattr(obj, name)
row.append(field.coords[int(index)])
else:
# Handle other field types
field = getattr(obj, field_name) if obj else ''
if isinstance(field, date):
formatted = field.strftime(u'%Y%m%d')
row.append(text_type(formatted))
elif isinstance(field, bool):
row.append(1 if field else 0)
elif field is None:
row.append(u'')
else:
row.append(text_type(field))
for col in extra_columns:
row.append(obj.extra_data.get(col, u''))
rows.append(row)
if len(rows) % batch_size == 0: # pragma: no cover
write_text_rows(csv_writer, rows)
count += len(rows)
logger.info(
"Exported %d %s",
count, cls._meta.verbose_name_plural)
rows = []
# Write rows smaller than batch size
write_text_rows(csv_writer, rows)
return out.getvalue() |
Turn an AndroidApi's method into a function that builds the request,
sends it, then passes the response to the actual method. Should be used
as a decorator.
def make_android_api_method(req_method, secure=True, version=0):
"""Turn an AndroidApi's method into a function that builds the request,
sends it, then passes the response to the actual method. Should be used
as a decorator.
"""
def outer_func(func):
def inner_func(self, **kwargs):
req_url = self._build_request_url(secure, func.__name__, version)
req_func = self._build_request(req_method, req_url, params=kwargs)
response = req_func()
func(self, response)
return response
return inner_func
return outer_func |
Get the params that will be included with every request
def _get_base_params(self):
"""Get the params that will be included with every request
"""
base_params = {
'locale': self._get_locale(),
'device_id': ANDROID.DEVICE_ID,
'device_type': ANDROID.APP_PACKAGE,
'access_token': ANDROID.ACCESS_TOKEN,
'version': ANDROID.APP_CODE,
}
base_params.update(dict((k, v) \
for k, v in iteritems(self._state_params) \
if v is not None))
return base_params |
Build a URL for a API method request
def _build_request_url(self, secure, api_method, version):
"""Build a URL for a API method request
"""
if secure:
proto = ANDROID.PROTOCOL_SECURE
else:
proto = ANDROID.PROTOCOL_INSECURE
req_url = ANDROID.API_URL.format(
protocol=proto,
api_method=api_method,
version=version
)
return req_url |
Get if the session is premium for a given media type
@param str media_type Should be one of ANDROID.MEDIA_TYPE_*
@return bool
def is_premium(self, media_type):
"""Get if the session is premium for a given media type
@param str media_type Should be one of ANDROID.MEDIA_TYPE_*
@return bool
"""
if self.logged_in:
if media_type in self._user_data['premium']:
return True
return False |
Login using email/username and password, used to get the auth token
@param str account
@param str password
@param int duration (optional)
def login(self, response):
"""
Login using email/username and password, used to get the auth token
@param str account
@param str password
@param int duration (optional)
"""
self._state_params['auth'] = response['auth']
self._user_data = response['user']
if not self.logged_in:
raise ApiLoginFailure(response) |
Read a numeric value from a file-like object.
def read_numeric(fmt, buff, byteorder='big'):
"""Read a numeric value from a file-like object."""
try:
fmt = fmt[byteorder]
return fmt.unpack(buff.read(fmt.size))[0]
except StructError:
return 0
except KeyError as exc:
raise ValueError('Invalid byte order') from exc |
Write a numeric value to a file-like object.
def write_numeric(fmt, value, buff, byteorder='big'):
"""Write a numeric value to a file-like object."""
try:
buff.write(fmt[byteorder].pack(value))
except KeyError as exc:
raise ValueError('Invalid byte order') from exc |
Read a string from a file-like object.
def read_string(buff, byteorder='big'):
"""Read a string from a file-like object."""
length = read_numeric(USHORT, buff, byteorder)
return buff.read(length).decode('utf-8') |
Write a string to a file-like object.
def write_string(value, buff, byteorder='big'):
"""Write a string to a file-like object."""
data = value.encode('utf-8')
write_numeric(USHORT, len(data), buff, byteorder)
buff.write(data) |
Infer a list subtype from a collection of items.
def infer_list_subtype(items):
"""Infer a list subtype from a collection of items."""
subtype = End
for item in items:
item_type = type(item)
if not issubclass(item_type, Base):
continue
if subtype is End:
subtype = item_type
if not issubclass(subtype, List):
return subtype
elif subtype is not item_type:
stype, itype = subtype, item_type
generic = List
while issubclass(stype, List) and issubclass(itype, List):
stype, itype = stype.subtype, itype.subtype
generic = List[generic]
if stype is End:
subtype = item_type
elif itype is not End:
return generic.subtype
return subtype |
Cast list item to the appropriate tag type.
def cast_item(cls, item):
"""Cast list item to the appropriate tag type."""
if not isinstance(item, cls.subtype):
incompatible = isinstance(item, Base) and not any(
issubclass(cls.subtype, tag_type) and isinstance(item, tag_type)
for tag_type in cls.all_tags.values()
)
if incompatible:
raise IncompatibleItemType(item, cls.subtype)
try:
return cls.subtype(item)
except EndInstantiation:
raise ValueError('List tags without an explicit subtype must '
'either be empty or instantiated with '
'elements from which a subtype can be '
'inferred') from None
except (IncompatibleItemType, CastError):
raise
except Exception as exc:
raise CastError(item, cls.subtype) from exc
return item |
Recursively merge tags from another compound.
def merge(self, other):
"""Recursively merge tags from another compound."""
for key, value in other.items():
if key in self and (isinstance(self[key], Compound)
and isinstance(value, dict)):
self[key].merge(value)
else:
self[key] = value |
Decrypt encrypted subtitle data in high level model object
@param crunchyroll.models.Subtitle subtitle
@return str
def decrypt_subtitle(self, subtitle):
"""Decrypt encrypted subtitle data in high level model object
@param crunchyroll.models.Subtitle subtitle
@return str
"""
return self.decrypt(self._build_encryption_key(int(subtitle.id)),
subtitle['iv'][0].text.decode('base64'),
subtitle['data'][0].text.decode('base64')) |
Decrypt encrypted subtitle data
@param int subtitle_id
@param str iv
@param str encrypted_data
@return str
def decrypt(self, encryption_key, iv, encrypted_data):
"""Decrypt encrypted subtitle data
@param int subtitle_id
@param str iv
@param str encrypted_data
@return str
"""
logger.info('Decrypting subtitles with length (%d bytes), key=%r',
len(encrypted_data), encryption_key)
return zlib.decompress(aes_decrypt(encryption_key, iv, encrypted_data)) |
Generate the encryption key for a given media item
Encryption key is basically just
sha1(<magic value based on subtitle_id> + '"#$&).6CXzPHw=2N_+isZK') then
padded with 0s to 32 chars
@param int subtitle_id
@param int key_size
@return str
def _build_encryption_key(self, subtitle_id, key_size=ENCRYPTION_KEY_SIZE):
"""Generate the encryption key for a given media item
Encryption key is basically just
sha1(<magic value based on subtitle_id> + '"#$&).6CXzPHw=2N_+isZK') then
padded with 0s to 32 chars
@param int subtitle_id
@param int key_size
@return str
"""
# generate a 160-bit SHA1 hash
sha1_hash = hashlib.new('sha1', self._build_hash_secret((1, 2)) +
self._build_hash_magic(subtitle_id)).digest()
# pad to 256-bit hash for 32 byte key
sha1_hash += '\x00' * max(key_size - len(sha1_hash), 0)
return sha1_hash[:key_size] |
Build the other half of the encryption key hash
I have no idea what is going on here
@param int subtitle_id
@return str
def _build_hash_magic(self, subtitle_id):
"""Build the other half of the encryption key hash
I have no idea what is going on here
@param int subtitle_id
@return str
"""
media_magic = self.HASH_MAGIC_CONST ^ subtitle_id
hash_magic = media_magic ^ media_magic >> 3 ^ media_magic * 32
return str(hash_magic) |
Build a seed for the hash based on the Fibonacci sequence
Take first `seq_len` + len(`seq_seed`) characters of Fibonacci
sequence, starting with `seq_seed`, and applying e % `mod_value` +
`HASH_SECRET_CHAR_OFFSET` to the resulting sequence, then return as
a string
@param tuple|list seq_seed
@param int seq_len
@param int mod_value
@return str
def _build_hash_secret(self, seq_seed, seq_len=HASH_SECRET_LENGTH,
mod_value=HASH_SECRET_MOD_CONST):
"""Build a seed for the hash based on the Fibonacci sequence
Take first `seq_len` + len(`seq_seed`) characters of Fibonacci
sequence, starting with `seq_seed`, and applying e % `mod_value` +
`HASH_SECRET_CHAR_OFFSET` to the resulting sequence, then return as
a string
@param tuple|list seq_seed
@param int seq_len
@param int mod_value
@return str
"""
# make sure we use a list, tuples are immutable
fbn_seq = list(seq_seed)
for i in range(seq_len):
fbn_seq.append(fbn_seq[-1] + fbn_seq[-2])
hash_secret = list(map(
lambda c: chr(c % mod_value + self.HASH_SECRET_CHAR_OFFSET),
fbn_seq[2:]))
return ''.join(hash_secret) |
Turn a string containing the subs xml document into the formatted
subtitle string
@param str|crunchyroll.models.StyledSubtitle sub_xml_text
@return str
def format(self, subtitles):
"""Turn a string containing the subs xml document into the formatted
subtitle string
@param str|crunchyroll.models.StyledSubtitle sub_xml_text
@return str
"""
logger.debug('Formatting subtitles (id=%s) with %s',
subtitles.id, self.__class__.__name__)
return self._format(subtitles).encode('utf-8') |
Check if API sessions are started and start them if not
def require_session_started(func):
"""Check if API sessions are started and start them if not
"""
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
if not self.session_started:
logger.info('Starting session for required meta method')
self.start_session()
return func(self, *pargs, **kwargs)
return inner_func |
Check if andoid API is logged in and login if not, implies
`require_session_started`
def require_android_logged_in(func):
"""Check if andoid API is logged in and login if not, implies
`require_session_started`
"""
@functools.wraps(func)
@require_session_started
def inner_func(self, *pargs, **kwargs):
if not self._android_api.logged_in:
logger.info('Logging into android API for required meta method')
if not self.has_credentials:
raise ApiLoginFailure(
'Login is required but no credentials were provided')
self._android_api.login(account=self._state['username'],
password=self._state['password'])
return func(self, *pargs, **kwargs)
return inner_func |
Check if andoid manga API is logged in and login if credentials were provided,
implies `require_session_started`
def optional_manga_logged_in(func):
"""Check if andoid manga API is logged in and login if credentials were provided,
implies `require_session_started`
"""
@functools.wraps(func)
@require_session_started
def inner_func(self, *pargs, **kwargs):
if not self._manga_api.logged_in and self.has_credentials:
logger.info('Logging into android manga API for optional meta method')
self._manga_api.cr_login(account=self._state['username'],
password=self._state['password'])
return func(self, *pargs, **kwargs)
return inner_func |
Check if ajax API is logged in and login if not
def require_ajax_logged_in(func):
"""Check if ajax API is logged in and login if not
"""
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
if not self._ajax_api.logged_in:
logger.info('Logging into AJAX API for required meta method')
if not self.has_credentials:
raise ApiLoginFailure(
'Login is required but no credentials were provided')
self._ajax_api.User_Login(name=self._state['username'],
password=self._state['password'])
return func(self, *pargs, **kwargs)
return inner_func |
Start the underlying APIs sessions
Calling this is not required, it will be called automatically if
a method that needs a session is called
@return bool
def start_session(self):
"""Start the underlying APIs sessions
Calling this is not required, it will be called automatically if
a method that needs a session is called
@return bool
"""
self._android_api.start_session()
self._manga_api.cr_start_session()
return self.session_started |
Login with the given username/email and password
Calling this method is not required if credentials were provided in
the constructor, but it could be used to switch users or something maybe
@return bool
def login(self, username, password):
"""Login with the given username/email and password
Calling this method is not required if credentials were provided in
the constructor, but it could be used to switch users or something maybe
@return bool
"""
# we could get stuck in an inconsistent state if got an exception while
# trying to login with different credentials than what is stored so
# we rollback the state to prevent that
state_snapshot = self._state.copy()
try:
self._ajax_api.User_Login(name=username, password=password)
self._android_api.login(account=username, password=password)
self._manga_api.cr_login(account=username, password=password)
except Exception as err:
# something went wrong, rollback
self._state = state_snapshot
raise err
self._state['username'] = username
self._state['password'] = password
return self.logged_in |
Get a list of anime series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
def list_anime_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0):
"""Get a list of anime series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
"""
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_ANIME,
filter=sort,
limit=limit,
offset=offset)
return result |
Get a list of drama series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
def list_drama_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0):
"""Get a list of drama series
@param str sort pick how results should be sorted, should be one
of META.SORT_*
@param int limit limit number of series to return, there doesn't
seem to be an upper bound
@param int offset list series starting from this offset, for pagination
@return list<crunchyroll.models.Series>
"""
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_DRAMA,
filter=sort,
limit=limit,
offset=offset)
return result |
Get a list of manga series
def list_manga_series(self, filter=None, content_type='jp_manga'):
"""Get a list of manga series
"""
result = self._manga_api.list_series(filter, content_type)
return result |
Search anime series list by series name, case-sensitive
@param str query_string string to search for, note that the search
is very simplistic and only matches against
the start of the series name, ex) search
for "space" matches "Space Brothers" but
wouldn't match "Brothers Space"
@return list<crunchyroll.models.Series>
def search_anime_series(self, query_string):
"""Search anime series list by series name, case-sensitive
@param str query_string string to search for, note that the search
is very simplistic and only matches against
the start of the series name, ex) search
for "space" matches "Space Brothers" but
wouldn't match "Brothers Space"
@return list<crunchyroll.models.Series>
"""
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_ANIME,
filter=ANDROID.FILTER_PREFIX + query_string)
return result |
Search drama series list by series name, case-sensitive
@param str query_string string to search for, note that the search
is very simplistic and only matches against
the start of the series name, ex) search
for "space" matches "Space Brothers" but
wouldn't match "Brothers Space"
@return list<crunchyroll.models.Series>
def search_drama_series(self, query_string):
"""Search drama series list by series name, case-sensitive
@param str query_string string to search for, note that the search
is very simplistic and only matches against
the start of the series name, ex) search
for "space" matches "Space Brothers" but
wouldn't match "Brothers Space"
@return list<crunchyroll.models.Series>
"""
result = self._android_api.list_series(
media_type=ANDROID.MEDIA_TYPE_DRAMA,
filter=ANDROID.FILTER_PREFIX + query_string)
return result |
Search the manga series list by name, case-insensitive
@param str query_string
@return list<crunchyroll.models.Series>
def search_manga_series(self, query_string):
"""Search the manga series list by name, case-insensitive
@param str query_string
@return list<crunchyroll.models.Series>
"""
result = self._manga_api.list_series()
return [series for series in result \
if series['locale']['enUS']['name'].lower().startswith(
query_string.lower())] |
List media for a given series or collection
@param crunchyroll.models.Series series the series to search for
@param str sort choose the ordering of the
results, only META.SORT_DESC
is known to work
@param int limit limit size of results
@param int offset start results from this index,
for pagination
@return list<crunchyroll.models.Media>
def list_media(self, series, sort=META.SORT_DESC, limit=META.MAX_MEDIA, offset=0):
"""List media for a given series or collection
@param crunchyroll.models.Series series the series to search for
@param str sort choose the ordering of the
results, only META.SORT_DESC
is known to work
@param int limit limit size of results
@param int offset start results from this index,
for pagination
@return list<crunchyroll.models.Media>
"""
params = {
'sort': sort,
'offset': offset,
'limit': limit,
}
params.update(self._get_series_query_dict(series))
result = self._android_api.list_media(**params)
return result |
Search for media from a series starting with query_string, case-sensitive
@param crunchyroll.models.Series series the series to search in
@param str query_string the search query, same restrictions
as `search_anime_series`
@return list<crunchyroll.models.Media>
def search_media(self, series, query_string):
"""Search for media from a series starting with query_string, case-sensitive
@param crunchyroll.models.Series series the series to search in
@param str query_string the search query, same restrictions
as `search_anime_series`
@return list<crunchyroll.models.Media>
"""
params = {
'sort': ANDROID.FILTER_PREFIX + query_string,
}
params.update(self._get_series_query_dict(series))
result = self._android_api.list_media(**params)
return result |
Get the stream data for a given media item
@param crunchyroll.models.Media media_item
@param int format
@param int quality
@return crunchyroll.models.MediaStream
def get_media_stream(self, media_item, format, quality):
"""Get the stream data for a given media item
@param crunchyroll.models.Media media_item
@param int format
@param int quality
@return crunchyroll.models.MediaStream
"""
result = self._ajax_api.VideoPlayer_GetStandardConfig(
media_id=media_item.media_id,
video_format=format,
video_quality=quality)
return MediaStream(result) |
Turn a SubtitleStub into a full Subtitle object
@param crunchyroll.models.SubtitleStub subtitle_stub
@return crunchyroll.models.Subtitle
def unfold_subtitle_stub(self, subtitle_stub):
"""Turn a SubtitleStub into a full Subtitle object
@param crunchyroll.models.SubtitleStub subtitle_stub
@return crunchyroll.models.Subtitle
"""
return Subtitle(self._ajax_api.Subtitle_GetXml(
subtitle_script_id=int(subtitle_stub.id))) |
Get the available media formats for a given media item
@param crunchyroll.models.Media
@return dict
def get_stream_formats(self, media_item):
"""Get the available media formats for a given media item
@param crunchyroll.models.Media
@return dict
"""
scraper = ScraperApi(self._ajax_api._connector)
formats = scraper.get_media_formats(media_item.media_id)
return formats |
List the series in the queue, optionally filtering by type of media
@param list<str> media_types a list of media types to filter the queue
with, should be of META.TYPE_*
@return list<crunchyroll.models.Series>
def list_queue(self, media_types=[META.TYPE_ANIME, META.TYPE_DRAMA]):
"""List the series in the queue, optionally filtering by type of media
@param list<str> media_types a list of media types to filter the queue
with, should be of META.TYPE_*
@return list<crunchyroll.models.Series>
"""
result = self._android_api.queue(media_types='|'.join(media_types))
return [queue_item['series'] for queue_item in result] |
Add a series to the queue
@param crunchyroll.models.Series series
@return bool
def add_to_queue(self, series):
"""Add a series to the queue
@param crunchyroll.models.Series series
@return bool
"""
result = self._android_api.add_to_queue(series_id=series.series_id)
return result |
Remove a series from the queue
@param crunchyroll.models.Series series
@return bool
def remove_from_queue(self, series):
"""Remove a series from the queue
@param crunchyroll.models.Series series
@return bool
"""
result = self._android_api.remove_from_queue(series_id=series.series_id)
return result |
Create a compound tag schema.
This function is a short convenience function that makes it easy to
subclass the base `CompoundSchema` class.
The `name` argument is the name of the class and `dct` should be a
dictionnary containing the actual schema. The schema should map keys
to tag types or other compound schemas.
If the `strict` keyword only argument is set to True, interacting
with keys that are not defined in the schema will raise a
`TypeError`.
def schema(name, dct, *, strict=False):
"""Create a compound tag schema.
This function is a short convenience function that makes it easy to
subclass the base `CompoundSchema` class.
The `name` argument is the name of the class and `dct` should be a
dictionnary containing the actual schema. The schema should map keys
to tag types or other compound schemas.
If the `strict` keyword only argument is set to True, interacting
with keys that are not defined in the schema will raise a
`TypeError`.
"""
return type(name, (CompoundSchema,), {'__slots__': (), 'schema': dct,
'strict': strict}) |
Cast schema item to the appropriate tag type.
def cast_item(cls, key, value):
"""Cast schema item to the appropriate tag type."""
schema_type = cls.schema.get(key)
if schema_type is None:
if cls.strict:
raise TypeError(f'Invalid key {key!r}')
elif not isinstance(value, schema_type):
try:
return schema_type(value)
except CastError:
raise
except Exception as exc:
raise CastError(value, schema_type) from exc
return value |
Simple obsolete/removed method decorator
Parameters
----------
oldname : str
The name of the old obsolete name
newfunc : FunctionType
Replacement unbound member function.
def obsolete_rename(oldname, newfunc):
"""
Simple obsolete/removed method decorator
Parameters
----------
oldname : str
The name of the old obsolete name
newfunc : FunctionType
Replacement unbound member function.
"""
newname = newfunc.__name__
def __obsolete(*args, **kwargs):
warnings.warn(
"{oldname} is obsolete and is removed in PyQt5. "
"Use {newname} instead.".format(oldname=oldname, newname=newname),
DeprecationWarning,
stacklevel=2
)
return newfunc(*args, **kwargs)
__obsolete.__name__ = oldname
return __obsolete |
Runs a bash command safely, with shell=false, catches any non-zero
return codes. Raises slightly modified CalledProcessError exceptions
on failures.
Note: command is a string and cannot include pipes.
def call(command, silent=False):
""" Runs a bash command safely, with shell=false, catches any non-zero
return codes. Raises slightly modified CalledProcessError exceptions
on failures.
Note: command is a string and cannot include pipes."""
try:
if silent:
with open(os.devnull, 'w') as FNULL:
return subprocess.check_call(command_to_array(command), stdout=FNULL)
else:
# Using the defaults, shell=False, no i/o redirection.
return check_call(command_to_array(command))
except CalledProcessError as e:
# We are modifying the error itself for 2 reasons. 1) it WILL contain
# login credentials when run_mongodump is run, 2) CalledProcessError is
# slightly not-to-spec (the message variable is blank), which means
# cronutils.ErrorHandler would report unlabeled stack traces.
e.message = "%s failed with error code %s" % (e.cmd[0], e.returncode)
e.cmd = e.cmd[0] + " [arguments stripped for security]"
raise e |
Tars and bzips a directory, preserving as much metadata as possible.
Adds '.tbz' to the provided output file name.
def tarbz(source_directory_path, output_file_full_path, silent=False):
""" Tars and bzips a directory, preserving as much metadata as possible.
Adds '.tbz' to the provided output file name. """
output_directory_path = output_file_full_path.rsplit("/", 1)[0]
create_folders(output_directory_path)
# Note: default compression for bzip is supposed to be -9, highest compression.
full_tar_file_path = output_file_full_path + ".tbz"
if path.exists(full_tar_file_path):
raise Exception("%s already exists, aborting." % (full_tar_file_path))
# preserve permissions, create file, use files (not tape devices), preserve
# access time. tar is the only program in the universe to use (dstn, src).
tar_command = ("tar jpcfvC %s %s %s" %
(full_tar_file_path, source_directory_path, "./"))
call(tar_command, silent=silent)
return full_tar_file_path |
Restores your mongo database backup from a .tbz created using this library.
This function will ensure that a directory is created at the file path
if one does not exist already.
If used in conjunction with this library's mongodump operation, the backup
data will be extracted directly into the provided directory path.
This command will fail if the output directory is not empty as existing files
with identical names are not overwritten by tar.
def untarbz(source_file_path, output_directory_path, silent=False):
""" Restores your mongo database backup from a .tbz created using this library.
This function will ensure that a directory is created at the file path
if one does not exist already.
If used in conjunction with this library's mongodump operation, the backup
data will be extracted directly into the provided directory path.
This command will fail if the output directory is not empty as existing files
with identical names are not overwritten by tar. """
if not path.exists(source_file_path):
raise Exception("the provided tar file %s does not exist." % (source_file_path))
if output_directory_path[0:1] == "./":
output_directory_path = path.abspath(output_directory_path)
if output_directory_path[0] != "/":
raise Exception("your output directory path must start with '/' or './'; you used: %s"
% (output_directory_path))
create_folders(output_directory_path)
if listdir(output_directory_path):
raise Exception("Your output directory isn't empty. Aborting as "
+ "exiting files are not overwritten by tar.")
untar_command = ("tar jxfvkCp %s %s --atime-preserve " %
(source_file_path, output_directory_path))
call(untar_command, silent=silent) |
Determine if any of the items in the value list for the given
attribute contain value.
def value_contains(self, value, attribute):
"""
Determine if any of the items in the value list for the given
attribute contain value.
"""
for item in self[attribute]:
if value in item:
return True
return False |
Clear all search defaults specified by the list of parameter names
given as ``args``. If ``args`` is not given, then clear all existing
search defaults.
Examples::
conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn'])
conn.clear_search_defaults(['scope'])
conn.clear_search_defaults()
def clear_search_defaults(self, args=None):
"""
Clear all search defaults specified by the list of parameter names
given as ``args``. If ``args`` is not given, then clear all existing
search defaults.
Examples::
conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn'])
conn.clear_search_defaults(['scope'])
conn.clear_search_defaults()
"""
if args is None:
self._search_defaults.clear()
else:
for arg in args:
if arg in self._search_defaults:
del self._search_defaults[arg] |
Search the directory.
def search(self, filter, base_dn=None, attrs=None, scope=None,
timeout=None, limit=None):
"""
Search the directory.
"""
if base_dn is None:
base_dn = self._search_defaults.get('base_dn', '')
if attrs is None:
attrs = self._search_defaults.get('attrs', None)
if scope is None:
scope = self._search_defaults.get('scope', ldap.SCOPE_SUBTREE)
if timeout is None:
timeout = self._search_defaults.get('timeout', -1)
if limit is None:
limit = self._search_defaults.get('limit', 0)
results = self.connection.search_ext_s(
base_dn, scope, filter, attrs, timeout=timeout, sizelimit=limit)
return self.to_items(results) |
Get a single object.
This is a convenience wrapper for the search method that checks that
only one object was returned, and returns that single object instead
of a list. This method takes the exact same arguments as search.
def get(self, *args, **kwargs):
"""
Get a single object.
This is a convenience wrapper for the search method that checks that
only one object was returned, and returns that single object instead
of a list. This method takes the exact same arguments as search.
"""
results = self.search(*args, **kwargs)
num_results = len(results)
if num_results == 1:
return results[0]
if num_results > 1:
raise MultipleObjectsFound()
raise ObjectNotFound() |
Attempt to authenticate given dn and password using a bind operation.
Return True if the bind is successful, and return False there was an
exception raised that is contained in
self.failed_authentication_exceptions.
def authenticate(self, dn='', password=''):
"""
Attempt to authenticate given dn and password using a bind operation.
Return True if the bind is successful, and return False there was an
exception raised that is contained in
self.failed_authentication_exceptions.
"""
try:
self.connection.simple_bind_s(dn, password)
except tuple(self.failed_authentication_exceptions):
return False
else:
return True |
Compare the ``attr`` of the entry ``dn`` with given ``value``.
This is a convenience wrapper for the ldap library's ``compare``
function that returns a boolean value instead of 1 or 0.
def compare(self, dn, attr, value):
"""
Compare the ``attr`` of the entry ``dn`` with given ``value``.
This is a convenience wrapper for the ldap library's ``compare``
function that returns a boolean value instead of 1 or 0.
"""
return self.connection.compare_s(dn, attr, value) == 1 |
Get the accessor function for an instance to look for `key`.
Look for it as an attribute, and if that does not work, look to see if it
is a tag.
def get_property_func(key):
"""
Get the accessor function for an instance to look for `key`.
Look for it as an attribute, and if that does not work, look to see if it
is a tag.
"""
def get_it(obj):
try:
return getattr(obj, key)
except AttributeError:
return obj.tags.get(key)
return get_it |
List available billing metrics
def list_billing(region, filter_by_kwargs):
"""List available billing metrics"""
conn = boto.ec2.cloudwatch.connect_to_region(region)
metrics = conn.list_metrics(metric_name='EstimatedCharges')
# Filtering is based on metric Dimensions. Only really valuable one is
# ServiceName.
if filter_by_kwargs:
filter_key = filter_by_kwargs.keys()[0]
filter_value = filter_by_kwargs.values()[0]
if filter_value:
filtered_metrics = [x for x in metrics if x.dimensions.get(filter_key) and x.dimensions.get(filter_key)[0] == filter_value]
else:
# ServiceName=''
filtered_metrics = [x for x in metrics if not x.dimensions.get(filter_key)]
else:
filtered_metrics = metrics
return filtered_metrics |
List running ec2 instances.
def list_cloudfront(region, filter_by_kwargs):
"""List running ec2 instances."""
conn = boto.connect_cloudfront()
instances = conn.get_all_distributions()
return lookup(instances, filter_by=filter_by_kwargs) |
List running ec2 instances.
def list_ec2(region, filter_by_kwargs):
"""List running ec2 instances."""
conn = boto.ec2.connect_to_region(region)
instances = conn.get_only_instances()
return lookup(instances, filter_by=filter_by_kwargs) |
List running ebs volumes.
def list_ebs(region, filter_by_kwargs):
"""List running ebs volumes."""
conn = boto.ec2.connect_to_region(region)
instances = conn.get_all_volumes()
return lookup(instances, filter_by=filter_by_kwargs) |
List all load balancers.
def list_elb(region, filter_by_kwargs):
"""List all load balancers."""
conn = boto.ec2.elb.connect_to_region(region)
instances = conn.get_all_load_balancers()
return lookup(instances, filter_by=filter_by_kwargs) |
List all RDS thingys.
def list_rds(region, filter_by_kwargs):
"""List all RDS thingys."""
conn = boto.rds.connect_to_region(region)
instances = conn.get_all_dbinstances()
return lookup(instances, filter_by=filter_by_kwargs) |
List all ElastiCache Clusters.
def list_elasticache(region, filter_by_kwargs):
"""List all ElastiCache Clusters."""
conn = boto.elasticache.connect_to_region(region)
req = conn.describe_cache_clusters()
data = req["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]["CacheClusters"]
if filter_by_kwargs:
clusters = [x['CacheClusterId'] for x in data if x[filter_by_kwargs.keys()[0]] == filter_by_kwargs.values()[0]]
else:
clusters = [x['CacheClusterId'] for x in data]
return clusters |
List all Auto Scaling Groups.
def list_autoscaling_group(region, filter_by_kwargs):
"""List all Auto Scaling Groups."""
conn = boto.ec2.autoscale.connect_to_region(region)
groups = conn.get_all_groups()
return lookup(groups, filter_by=filter_by_kwargs) |
List all SQS Queues.
def list_sqs(region, filter_by_kwargs):
"""List all SQS Queues."""
conn = boto.sqs.connect_to_region(region)
queues = conn.get_all_queues()
return lookup(queues, filter_by=filter_by_kwargs) |
List all the kinesis applications along with the shards for each stream
def list_kinesis_applications(region, filter_by_kwargs):
"""List all the kinesis applications along with the shards for each stream"""
conn = boto.kinesis.connect_to_region(region)
streams = conn.list_streams()['StreamNames']
kinesis_streams = {}
for stream_name in streams:
shard_ids = []
shards = conn.describe_stream(stream_name)['StreamDescription']['Shards']
for shard in shards:
shard_ids.append(shard['ShardId'])
kinesis_streams[stream_name] = shard_ids
return kinesis_streams |
List all DynamoDB tables.
def list_dynamodb(region, filter_by_kwargs):
"""List all DynamoDB tables."""
conn = boto.dynamodb.connect_to_region(region)
tables = conn.list_tables()
return lookup(tables, filter_by=filter_by_kwargs) |
Register a new handler for a specific :class:`slack.actions.Action` `callback_id`.
Optional routing based on the action name too.
The name argument is useful for actions of type `interactive_message` to provide
a different handler for each individual action.
Args:
callback_id: Callback_id the handler is interested in
handler: Callback
name: Name of the action (optional).
def register(self, callback_id: str, handler: Any, name: str = "*") -> None:
"""
Register a new handler for a specific :class:`slack.actions.Action` `callback_id`.
Optional routing based on the action name too.
The name argument is useful for actions of type `interactive_message` to provide
a different handler for each individual action.
Args:
callback_id: Callback_id the handler is interested in
handler: Callback
name: Name of the action (optional).
"""
LOG.info("Registering %s, %s to %s", callback_id, name, handler)
if name not in self._routes[callback_id]:
self._routes[callback_id][name] = []
self._routes[callback_id][name].append(handler) |
Yields handlers matching the incoming :class:`slack.actions.Action` `callback_id`.
Args:
action: :class:`slack.actions.Action`
Yields:
handler
def dispatch(self, action: Action) -> Any:
"""
Yields handlers matching the incoming :class:`slack.actions.Action` `callback_id`.
Args:
action: :class:`slack.actions.Action`
Yields:
handler
"""
LOG.debug("Dispatching action %s, %s", action["type"], action["callback_id"])
if action["type"] == "interactive_message":
yield from self._dispatch_interactive_message(action)
elif action["type"] in ("dialog_submission", "message_action"):
yield from self._dispatch_action(action)
else:
raise UnknownActionType(action) |
Commit to the use of specified Qt api.
Raise an error if another Qt api is already loaded in sys.modules
def comittoapi(api):
"""
Commit to the use of specified Qt api.
Raise an error if another Qt api is already loaded in sys.modules
"""
global USED_API
assert USED_API is None, "committoapi called again!"
check = ["PyQt4", "PyQt5", "PySide", "PySide2"]
assert api in [QT_API_PYQT5, QT_API_PYQT4, QT_API_PYSIDE, QT_API_PYSIDE2]
for name in check:
if name.lower() != api and name in sys.modules:
raise RuntimeError(
"{} was already imported. Cannot commit to {}!"
.format(name, api)
)
else:
api = _intern(api)
USED_API = api
AnyQt.__SELECTED_API = api
AnyQt.USED_API = api |
Return dictionary of metadata for given dist
@param dist: distribution
@type dist: pkg_resources Distribution object
@returns: dict of metadata or None
def get_metadata(dist):
"""
Return dictionary of metadata for given dist
@param dist: distribution
@type dist: pkg_resources Distribution object
@returns: dict of metadata or None
"""
if not dist.has_metadata('PKG-INFO'):
return
msg = email.message_from_string(dist.get_metadata('PKG-INFO'))
metadata = {}
for header in [l for l in msg._headers]:
metadata[header[0]] = header[1]
return metadata |
Add command-line options for this plugin.
The base plugin class adds --with-$name by default, used to enable the
plugin.
def add_options(self, parser):
"""Add command-line options for this plugin.
The base plugin class adds --with-$name by default, used to enable the
plugin.
"""
parser.add_option("--with-%s" % self.name,
action="store_true",
dest=self.enable_opt,
help="Enable plugin %s: %s" %
(self.__class__.__name__, self.help())
) |
Configure the plugin and system, based on selected options.
The base plugin class sets the plugin to enabled if the enable option
for the plugin (self.enable_opt) is true.
def configure(self, options, conf):
"""Configure the plugin and system, based on selected options.
The base plugin class sets the plugin to enabled if the enable option
for the plugin (self.enable_opt) is true.
"""
self.conf = conf
if hasattr(options, self.enable_opt):
self.enabled = getattr(options, self.enable_opt) |
Return help for this plugin. This will be output as the help
section of the --with-$name option that enables the plugin.
def help(self):
"""Return help for this plugin. This will be output as the help
section of the --with-$name option that enables the plugin.
"""
if self.__class__.__doc__:
# doc sections are often indented; compress the spaces
return textwrap.dedent(self.__class__.__doc__)
return "(no help available)" |
Check request response status
Args:
status: Response status
headers: Response headers
data: Response data
Raises:
:class:`slack.exceptions.RateLimited`: For 429 status code
:class:`slack.exceptions:HTTPException`:
def raise_for_status(
status: int, headers: MutableMapping, data: MutableMapping
) -> None:
"""
Check request response status
Args:
status: Response status
headers: Response headers
data: Response data
Raises:
:class:`slack.exceptions.RateLimited`: For 429 status code
:class:`slack.exceptions:HTTPException`:
"""
if status != 200:
if status == 429:
if isinstance(data, str):
error = data
else:
error = data.get("error", "ratelimited")
try:
retry_after = int(headers.get("Retry-After", 1))
except ValueError:
retry_after = 1
raise exceptions.RateLimited(retry_after, error, status, headers, data)
else:
raise exceptions.HTTPException(status, headers, data) |
Check request response for Slack API error
Args:
headers: Response headers
data: Response data
Raises:
:class:`slack.exceptions.SlackAPIError`
def raise_for_api_error(headers: MutableMapping, data: MutableMapping) -> None:
"""
Check request response for Slack API error
Args:
headers: Response headers
data: Response data
Raises:
:class:`slack.exceptions.SlackAPIError`
"""
if not data["ok"]:
raise exceptions.SlackAPIError(data.get("error", "unknow_error"), headers, data)
if "warning" in data:
LOG.warning("Slack API WARNING: %s", data["warning"]) |
Decode the response body
For 'application/json' content-type load the body as a dictionary
Args:
headers: Response headers
body: Response body
Returns:
decoded body
def decode_body(headers: MutableMapping, body: bytes) -> dict:
"""
Decode the response body
For 'application/json' content-type load the body as a dictionary
Args:
headers: Response headers
body: Response body
Returns:
decoded body
"""
type_, encoding = parse_content_type(headers)
decoded_body = body.decode(encoding)
# There is one api that just returns `ok` instead of json. In order to have a consistent API we decided to modify the returned payload into a dict.
if type_ == "application/json":
payload = json.loads(decoded_body)
else:
if decoded_body == "ok":
payload = {"ok": True}
else:
payload = {"ok": False, "data": decoded_body}
return payload |
Find content-type and encoding of the response
Args:
headers: Response headers
Returns:
:py:class:`tuple` (content-type, encoding)
def parse_content_type(headers: MutableMapping) -> Tuple[Optional[str], str]:
"""
Find content-type and encoding of the response
Args:
headers: Response headers
Returns:
:py:class:`tuple` (content-type, encoding)
"""
content_type = headers.get("content-type")
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding |
Prepare outgoing request
Create url, headers, add token to the body and if needed json encode it
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
headers: Custom headers
global_headers: Global headers
token: Slack API token
as_json: Post JSON to the slack API
Returns:
:py:class:`tuple` (url, body, headers)
def prepare_request(
url: Union[str, methods],
data: Optional[MutableMapping],
headers: Optional[MutableMapping],
global_headers: MutableMapping,
token: str,
as_json: Optional[bool] = None,
) -> Tuple[str, Union[str, MutableMapping], MutableMapping]:
"""
Prepare outgoing request
Create url, headers, add token to the body and if needed json encode it
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
headers: Custom headers
global_headers: Global headers
token: Slack API token
as_json: Post JSON to the slack API
Returns:
:py:class:`tuple` (url, body, headers)
"""
if isinstance(url, methods):
as_json = as_json or url.value[3]
real_url = url.value[0]
else:
real_url = url
as_json = False
if not headers:
headers = {**global_headers}
else:
headers = {**global_headers, **headers}
payload: Optional[Union[str, MutableMapping]] = None
if real_url.startswith(HOOK_URL) or (real_url.startswith(ROOT_URL) and as_json):
payload, headers = _prepare_json_request(data, token, headers)
elif real_url.startswith(ROOT_URL) and not as_json:
payload = _prepare_form_encoded_request(data, token)
else:
real_url = ROOT_URL + real_url
payload = _prepare_form_encoded_request(data, token)
return real_url, payload, headers |
Decode incoming response
Args:
status: Response status
headers: Response headers
body: Response body
Returns:
Response data
def decode_response(status: int, headers: MutableMapping, body: bytes) -> dict:
"""
Decode incoming response
Args:
status: Response status
headers: Response headers
body: Response body
Returns:
Response data
"""
data = decode_body(headers, body)
raise_for_status(status, headers, data)
raise_for_api_error(headers, data)
return data |
Find iteration mode and iteration key for a given :class:`slack.methods`
Args:
url: :class:`slack.methods` or string url
itermode: Custom iteration mode
iterkey: Custom iteration key
Returns:
:py:class:`tuple` (itermode, iterkey)
def find_iteration(
url: Union[methods, str],
itermode: Optional[str] = None,
iterkey: Optional[str] = None,
) -> Tuple[str, str]:
"""
Find iteration mode and iteration key for a given :class:`slack.methods`
Args:
url: :class:`slack.methods` or string url
itermode: Custom iteration mode
iterkey: Custom iteration key
Returns:
:py:class:`tuple` (itermode, iterkey)
"""
if isinstance(url, methods):
if not itermode:
itermode = url.value[1]
if not iterkey:
iterkey = url.value[2]
if not iterkey or not itermode:
raise ValueError("Iteration not supported for: {}".format(url))
elif itermode not in ITERMODE:
raise ValueError("Iteration not supported for: {}".format(itermode))
return itermode, iterkey |
Prepare outgoing iteration request
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode)
Returns:
:py:class:`tuple` (data, iterkey, itermode)
def prepare_iter_request(
url: Union[methods, str],
data: MutableMapping,
*,
iterkey: Optional[str] = None,
itermode: Optional[str] = None,
limit: int = 200,
itervalue: Optional[Union[str, int]] = None,
) -> Tuple[MutableMapping, str, str]:
"""
Prepare outgoing iteration request
Args:
url: :class:`slack.methods` item or string of url
data: Outgoing data
limit: Maximum number of results to return per call.
iterkey: Key in response data to iterate over (required for url string).
itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`)
itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode)
Returns:
:py:class:`tuple` (data, iterkey, itermode)
"""
itermode, iterkey = find_iteration(url, itermode, iterkey)
if itermode == "cursor":
data["limit"] = limit
if itervalue:
data["cursor"] = itervalue
elif itermode == "page":
data["count"] = limit
if itervalue:
data["page"] = itervalue
elif itermode == "timeline":
data["count"] = limit
if itervalue:
data["latest"] = itervalue
return data, iterkey, itermode |
Decode incoming response from an iteration request
Args:
data: Response data
Returns:
Next itervalue
def decode_iter_request(data: dict) -> Optional[Union[str, int]]:
"""
Decode incoming response from an iteration request
Args:
data: Response data
Returns:
Next itervalue
"""
if "response_metadata" in data:
return data["response_metadata"].get("next_cursor")
elif "paging" in data:
current_page = int(data["paging"].get("page", 1))
max_page = int(data["paging"].get("pages", 1))
if current_page < max_page:
return current_page + 1
elif "has_more" in data and data["has_more"] and "latest" in data:
return data["messages"][-1]["ts"]
return None |
Check if the incoming event needs to be discarded
Args:
event: Incoming :class:`slack.events.Event`
bot_id: Id of connected bot
Returns:
boolean
def discard_event(event: events.Event, bot_id: str = None) -> bool:
"""
Check if the incoming event needs to be discarded
Args:
event: Incoming :class:`slack.events.Event`
bot_id: Id of connected bot
Returns:
boolean
"""
if event["type"] in SKIP_EVENTS:
return True
elif bot_id and isinstance(event, events.Message):
if event.get("bot_id") == bot_id:
LOG.debug("Ignoring event: %s", event)
return True
elif "message" in event and event["message"].get("bot_id") == bot_id:
LOG.debug("Ignoring event: %s", event)
return True
return False |
Validate incoming request signature using the application signing secret.
Contrary to the ``team_id`` and ``verification_token`` verification this method is not called by ``slack-sansio`` when creating object from incoming HTTP request. Because the body of the request needs to be provided as text and not decoded as json beforehand.
Args:
body: Raw request body
headers: Request headers
signing_secret: Application signing_secret
Raise:
:class:`slack.exceptions.InvalidSlackSignature`: when provided and calculated signature do not match
:class:`slack.exceptions.InvalidTimestamp`: when incoming request timestamp is more than 5 minutes old
def validate_request_signature(
body: str, headers: MutableMapping, signing_secret: str
) -> None:
"""
Validate incoming request signature using the application signing secret.
Contrary to the ``team_id`` and ``verification_token`` verification this method is not called by ``slack-sansio`` when creating object from incoming HTTP request. Because the body of the request needs to be provided as text and not decoded as json beforehand.
Args:
body: Raw request body
headers: Request headers
signing_secret: Application signing_secret
Raise:
:class:`slack.exceptions.InvalidSlackSignature`: when provided and calculated signature do not match
:class:`slack.exceptions.InvalidTimestamp`: when incoming request timestamp is more than 5 minutes old
"""
request_timestamp = int(headers["X-Slack-Request-Timestamp"])
if (int(time.time()) - request_timestamp) > (60 * 5):
raise exceptions.InvalidTimestamp(timestamp=request_timestamp)
slack_signature = headers["X-Slack-Signature"]
calculated_signature = (
"v0="
+ hmac.new(
signing_secret.encode("utf-8"),
f"""v0:{headers["X-Slack-Request-Timestamp"]}:{body}""".encode("utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
)
if not hmac.compare_digest(slack_signature, calculated_signature):
raise exceptions.InvalidSlackSignature(slack_signature, calculated_signature) |
Runs a backup operation to At Least a local directory.
You must provide mongodb credentials along with a a directory for a dump
operation and a directory to contain your compressed backup.
backup_prefix: optionally provide a prefix to be prepended to your backups,
by default the prefix is "backup".
database: optionally provide the name of one specific database to back up
(instead of backing up all databases on the MongoDB server)
attached_directory_path: makes a second copy of the backup to a different
directory. This directory is checked before other operations and
will raise an error if it cannot be found.
s3_bucket: if you have an Amazon Web Services S3 account you can
automatically upload the backup to an S3 Bucket you provide;
requires s3_access_key_id and s3_secret key to be passed as well
s3_access_key_id, s3_secret_key: credentials for your AWS account.
purge_local: An integer value, the number of days of backups to purge
from local_backup_directory_path after operations have completed.
purge_attached: An integer value, the number of days of backups to purge
from attached_directory_path after operations have completed.
cleanup: set to False to leave the mongo_backup_directory_path after operations
have completed.
def backup(mongo_username, mongo_password, local_backup_directory_path, database=None,
attached_directory_path=None, custom_prefix="backup",
mongo_backup_directory_path="/tmp/mongo_dump",
s3_bucket=None, s3_access_key_id=None, s3_secret_key=None,
purge_local=None, purge_attached=None, cleanup=True, silent=False):
"""
Runs a backup operation to At Least a local directory.
You must provide mongodb credentials along with a a directory for a dump
operation and a directory to contain your compressed backup.
backup_prefix: optionally provide a prefix to be prepended to your backups,
by default the prefix is "backup".
database: optionally provide the name of one specific database to back up
(instead of backing up all databases on the MongoDB server)
attached_directory_path: makes a second copy of the backup to a different
directory. This directory is checked before other operations and
will raise an error if it cannot be found.
s3_bucket: if you have an Amazon Web Services S3 account you can
automatically upload the backup to an S3 Bucket you provide;
requires s3_access_key_id and s3_secret key to be passed as well
s3_access_key_id, s3_secret_key: credentials for your AWS account.
purge_local: An integer value, the number of days of backups to purge
from local_backup_directory_path after operations have completed.
purge_attached: An integer value, the number of days of backups to purge
from attached_directory_path after operations have completed.
cleanup: set to False to leave the mongo_backup_directory_path after operations
have completed.
"""
if attached_directory_path:
if not path.exists(attached_directory_path):
raise Exception("ERROR. Would have to create %s for your attached storage, make sure that file paths already exist and re-run"
% (attached_directory_path))
# Dump mongo, tarbz, copy to attached storage, upload to s3, purge, clean.
full_file_name_path = local_backup_directory_path + custom_prefix + time_string()
mongodump(mongo_username, mongo_password, mongo_backup_directory_path, database, silent=silent)
local_backup_file = tarbz(mongo_backup_directory_path, full_file_name_path, silent=silent)
if attached_directory_path:
copy(local_backup_file, attached_directory_path + local_backup_file.split("/")[-1])
if s3_bucket:
s3_upload(local_backup_file, s3_bucket, s3_access_key_id, s3_secret_key)
if purge_local:
purge_date = (datetime.utcnow().replace(second=0, microsecond=0) -
timedelta(days=purge_local))
purge_old_files(purge_date, local_backup_directory_path, custom_prefix=custom_prefix)
if purge_attached and attached_directory_path:
purge_date = (datetime.utcnow().replace(second=0, microsecond=0) -
timedelta(days=purge_attached))
purge_old_files(purge_date, attached_directory_path, custom_prefix=custom_prefix)
if cleanup:
rmtree(mongo_backup_directory_path) |
Runs mongorestore with source data from the provided .tbz backup, using
the provided username and password.
The contents of the .tbz will be dumped into the provided backup directory,
and that folder will be deleted after a successful mongodb restore unless
cleanup is set to False.
Note: the skip_system_and_user_files is intended for use with the changes
in user architecture introduced in mongodb version 2.6.
Warning: Setting drop_database to True will drop the ENTIRE
CURRENTLY RUNNING DATABASE before restoring.
Mongorestore requires a running mongod process, in addition the provided
user must have restore permissions for the database. A mongolia superuser
will have more than adequate permissions, but a regular user may not.
By default this function will clean up the output of the untar operation.
def restore(mongo_user, mongo_password, backup_tbz_path,
backup_directory_output_path="/tmp/mongo_dump",
drop_database=False, cleanup=True, silent=False,
skip_system_and_user_files=False):
"""
Runs mongorestore with source data from the provided .tbz backup, using
the provided username and password.
The contents of the .tbz will be dumped into the provided backup directory,
and that folder will be deleted after a successful mongodb restore unless
cleanup is set to False.
Note: the skip_system_and_user_files is intended for use with the changes
in user architecture introduced in mongodb version 2.6.
Warning: Setting drop_database to True will drop the ENTIRE
CURRENTLY RUNNING DATABASE before restoring.
Mongorestore requires a running mongod process, in addition the provided
user must have restore permissions for the database. A mongolia superuser
will have more than adequate permissions, but a regular user may not.
By default this function will clean up the output of the untar operation.
"""
if not path.exists(backup_tbz_path):
raise Exception("the provided tar file %s does not exist." % (backup_tbz_path))
untarbz(backup_tbz_path, backup_directory_output_path, silent=silent)
if skip_system_and_user_files:
system_and_users_path = "%s/admin" % backup_directory_output_path
if path.exists(system_and_users_path):
rmtree(system_and_users_path)
mongorestore(mongo_user, mongo_password, backup_directory_output_path,
drop_database=drop_database, silent=silent)
if cleanup:
rmtree(backup_directory_output_path) |
Runs mongodump using the provided credentials on the running mongod
process.
WARNING: This function will delete the contents of the provided
directory before it runs.
def mongodump(mongo_user, mongo_password, mongo_dump_directory_path, database=None, silent=False):
""" Runs mongodump using the provided credentials on the running mongod
process.
WARNING: This function will delete the contents of the provided
directory before it runs. """
if path.exists(mongo_dump_directory_path):
# If a backup dump already exists, delete it
rmtree(mongo_dump_directory_path)
if silent:
dump_command = ("mongodump --quiet -u %s -p %s -o %s"
% (mongo_user, mongo_password, mongo_dump_directory_path))
else:
dump_command = ("mongodump -u %s -p %s -o %s"
% (mongo_user, mongo_password, mongo_dump_directory_path))
if database:
dump_command += (" --db %s" % database)
call(dump_command, silent=silent) |
Warning: Setting drop_database to True will drop the ENTIRE
CURRENTLY RUNNING DATABASE before restoring.
Mongorestore requires a running mongod process, in addition the provided
user must have restore permissions for the database. A mongolia superuser
will have more than adequate permissions, but a regular user may not.
def mongorestore(mongo_user, mongo_password, backup_directory_path, drop_database=False, silent=False):
""" Warning: Setting drop_database to True will drop the ENTIRE
CURRENTLY RUNNING DATABASE before restoring.
Mongorestore requires a running mongod process, in addition the provided
user must have restore permissions for the database. A mongolia superuser
will have more than adequate permissions, but a regular user may not.
"""
if not path.exists(backup_directory_path):
raise Exception("the provided tar directory %s does not exist."
% (backup_directory_path))
if silent:
mongorestore_command = ("mongorestore --quiet -u %s -p %s %s"
% (mongo_user, mongo_password, backup_directory_path))
else:
mongorestore_command = ("mongorestore -v -u %s -p %s %s"
% (mongo_user, mongo_password, backup_directory_path))
if drop_database:
mongorestore_command = mongorestore_command + " --drop"
call(mongorestore_command, silent=silent) |
Returns a datetime object computed from a file name string, with
formatting based on DATETIME_FORMAT.
def get_backup_file_time_tag(file_name, custom_prefix="backup"):
""" Returns a datetime object computed from a file name string, with
formatting based on DATETIME_FORMAT."""
name_string = file_name[len(custom_prefix):]
time_tag = name_string.split(".", 1)[0]
return datetime.strptime(time_tag, DATETIME_FORMAT) |
Takes a datetime object and a directory path, runs through files in the
directory and deletes those tagged with a date from before the provided
datetime.
If your backups have a custom_prefix that is not the default ("backup"),
provide it with the "custom_prefix" kwarg.
def purge_old_files(date_time, directory_path, custom_prefix="backup"):
""" Takes a datetime object and a directory path, runs through files in the
directory and deletes those tagged with a date from before the provided
datetime.
If your backups have a custom_prefix that is not the default ("backup"),
provide it with the "custom_prefix" kwarg. """
for file_name in listdir(directory_path):
try:
file_date_time = get_backup_file_time_tag(file_name, custom_prefix=custom_prefix)
except ValueError as e:
if "does not match format" in e.message:
print("WARNING. file(s) in %s do not match naming convention."
% (directory_path))
continue
raise e
if file_date_time < date_time:
remove(directory_path + file_name) |
Use setuptools to search for a package's URI
@returns: URI string
def get_download_uri(package_name, version, source, index_url=None):
"""
Use setuptools to search for a package's URI
@returns: URI string
"""
tmpdir = None
force_scan = True
develop_ok = False
if not index_url:
index_url = 'http://cheeseshop.python.org/pypi'
if version:
pkg_spec = "%s==%s" % (package_name, version)
else:
pkg_spec = package_name
req = pkg_resources.Requirement.parse(pkg_spec)
pkg_index = MyPackageIndex(index_url)
try:
pkg_index.fetch_distribution(req, tmpdir, force_scan, source,
develop_ok)
except DownloadURI as url:
#Remove #egg=pkg-dev
clean_url = url.value.split("#")[0]
#If setuptools is asked for an egg and there isn't one, it will
#return source if available, which we don't want.
if not source and not clean_url.endswith(".egg") and \
not clean_url.endswith(".EGG"):
return
else:
return clean_url |
Return list of all installed packages
Note: It returns one project name per pkg no matter how many versions
of a particular package is installed
@returns: list of project name strings for every installed pkg
def get_pkglist():
"""
Return list of all installed packages
Note: It returns one project name per pkg no matter how many versions
of a particular package is installed
@returns: list of project name strings for every installed pkg
"""
dists = Distributions()
projects = []
for (dist, _active) in dists.get_distributions("all"):
if dist.project_name not in projects:
projects.append(dist.project_name)
return projects |
Register a new handler for a specific slash command
Args:
command: Slash command
handler: Callback
def register(self, command: str, handler: Any):
"""
Register a new handler for a specific slash command
Args:
command: Slash command
handler: Callback
"""
if not command.startswith("/"):
command = f"/{command}"
LOG.info("Registering %s to %s", command, handler)
self._routes[command].append(handler) |
Yields handlers matching the incoming :class:`slack.actions.Command`.
Args:
command: :class:`slack.actions.Command`
Yields:
handler
def dispatch(self, command: Command) -> Iterator[Any]:
"""
Yields handlers matching the incoming :class:`slack.actions.Command`.
Args:
command: :class:`slack.actions.Command`
Yields:
handler
"""
LOG.debug("Dispatching command %s", command["command"])
for callback in self._routes[command["command"]]:
yield callback |
Set the preferred Qt API.
Will raise a RuntimeError if a Qt API was already selected.
Note that QT_API environment variable (if set) will take precedence.
def setpreferredapi(api):
"""
Set the preferred Qt API.
Will raise a RuntimeError if a Qt API was already selected.
Note that QT_API environment variable (if set) will take precedence.
"""
global __PREFERRED_API
if __SELECTED_API is not None:
raise RuntimeError("A Qt api {} was already selected"
.format(__SELECTED_API))
if api.lower() not in {"pyqt4", "pyqt5", "pyside", "pyside2"}:
raise ValueError(api)
__PREFERRED_API = api.lower() |
Select an Qt API to use.
This can only be set once and before any of the Qt modules are explicitly
imported.
def selectapi(api):
"""
Select an Qt API to use.
This can only be set once and before any of the Qt modules are explicitly
imported.
"""
global __SELECTED_API, USED_API
if api.lower() not in {"pyqt4", "pyqt5", "pyside", "pyside2"}:
raise ValueError(api)
if __SELECTED_API is not None and __SELECTED_API.lower() != api.lower():
raise RuntimeError("A Qt API {} was already selected"
.format(__SELECTED_API))
elif __SELECTED_API is None:
__SELECTED_API = api.lower()
from . import _api
USED_API = _api.USED_API |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.