_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q261000 | _validate_roles | validation | def _validate_roles(model):
"""Given the model, check that all the metadata role values
have valid information in them and any required metadata fields
contain values.
"""
required_roles = (ATTRIBUTED_ROLE_KEYS[0], ATTRIBUTED_ROLE_KEYS[4],)
for role_key in ATTRIBUTED_ROLE_KEYS:
try:
roles = model.metadata[role_key]
except KeyError:
if role_key in required_roles:
raise exceptions.MissingRequiredMetadata(role_key)
else:
if role_key in required_roles and len(roles) == 0:
raise exceptions.MissingRequiredMetadata(role_key)
for role in roles:
if role.get('type') != 'cnx-id':
raise exceptions.InvalidRole(role_key, role) | python | {
"resource": ""
} |
q261001 | _validate_subjects | validation | def _validate_subjects(cursor, model):
"""Give a database cursor and model, check the subjects against
the subject vocabulary.
"""
subject_vocab = [term[0] for term in acquire_subject_vocabulary(cursor)]
subjects = model.metadata.get('subjects', [])
invalid_subjects = [s for s in subjects if s not in subject_vocab]
if invalid_subjects:
raise exceptions.InvalidMetadata('subjects', invalid_subjects) | python | {
"resource": ""
} |
q261002 | validate_model | validation | def validate_model(cursor, model):
"""Validates the model using a series of checks on bits of the data."""
# Check the license is one valid for publication.
_validate_license(model)
_validate_roles(model)
# Other required metadata includes: title, summary
required_metadata = ('title', 'summary',)
for metadata_key in required_metadata:
if model.metadata.get(metadata_key) in [None, '', []]:
raise exceptions.MissingRequiredMetadata(metadata_key)
# Ensure that derived-from values are either None
# or point at a live record in the archive.
_validate_derived_from(cursor, model)
# FIXME Valid language code?
# Are the given 'subjects'
_validate_subjects(cursor, model) | python | {
"resource": ""
} |
q261003 | lookup_document_pointer | validation | def lookup_document_pointer(ident_hash, cursor):
"""Lookup a document by id and version."""
id, version = split_ident_hash(ident_hash, split_version=True)
stmt = "SELECT name FROM modules WHERE uuid = %s"
args = [id]
if version and version[0] is not None:
operator = version[1] is None and 'is' or '='
stmt += " AND (major_version = %s AND minor_version {} %s)" \
.format(operator)
args.extend(version)
cursor.execute(stmt, args)
try:
title = cursor.fetchone()[0]
except TypeError:
raise DocumentLookupError()
else:
metadata = {'title': title}
return cnxepub.DocumentPointer(ident_hash, metadata) | python | {
"resource": ""
} |
q261004 | _node_to_model | validation | def _node_to_model(tree_or_item, metadata=None, parent=None,
lucent_id=cnxepub.TRANSLUCENT_BINDER_ID):
"""Given a tree, parse to a set of models"""
if 'contents' in tree_or_item:
# It is a binder.
tree = tree_or_item
binder = cnxepub.TranslucentBinder(metadata=tree)
for item in tree['contents']:
node = _node_to_model(item, parent=binder,
lucent_id=lucent_id)
if node.metadata['title'] != item['title']:
binder.set_title_for_node(node, item['title'])
result = binder
else:
# It is an item pointing at a document.
item = tree_or_item
result = cnxepub.DocumentPointer(item['id'], metadata=item)
if parent is not None:
parent.append(result)
return result | python | {
"resource": ""
} |
q261005 | _reassemble_binder | validation | def _reassemble_binder(id, tree, metadata):
"""Reassemble a Binder object coming out of the database."""
binder = cnxepub.Binder(id, metadata=metadata)
for item in tree['contents']:
node = _node_to_model(item, parent=binder)
if node.metadata['title'] != item['title']:
binder.set_title_for_node(node, item['title'])
return binder | python | {
"resource": ""
} |
q261006 | get_moderation | validation | def get_moderation(request):
"""Return the list of publications that need moderation."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT id, created, publisher, publication_message,
(select array_agg(row_to_json(pd))
from pending_documents as pd
where pd.publication_id = p.id) AS models
FROM publications AS p
WHERE state = 'Waiting for moderation') AS combined_rows""")
moderations = [x[0] for x in cursor.fetchall()]
return moderations | python | {
"resource": ""
} |
q261007 | includeme | validation | def includeme(config):
"""Configures the session manager"""
settings = config.registry.settings
session_factory = SignedCookieSessionFactory(settings['session_key'])
config.set_session_factory(session_factory) | python | {
"resource": ""
} |
q261008 | admin_print_styles | validation | def admin_print_styles(request):
"""
Returns a dictionary of all unique print_styles, and their latest tag,
revision, and recipe_type.
"""
styles = []
# This fetches all recipes that have been used to successfully bake a
# current book plus all default recipes that have not yet been used
# as well as "bad" books that are not "current" state, but would otherwise
# be the latest/current for that book
with db_connect(cursor_factory=DictCursor) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
WITH latest AS (SELECT print_style, recipe,
count(*), count(nullif(stateid, 1)) as bad
FROM modules m
WHERE portal_type = 'Collection'
AND recipe IS NOT NULL
AND (
baked IS NOT NULL OR (
baked IS NULL AND stateid not in (1,8)
)
)
AND ARRAY [major_version, minor_version] = (
SELECT max(ARRAY[major_version,minor_version]) FROM
modules where m.uuid= uuid)
GROUP BY print_style, recipe
),
defaults AS (SELECT print_style, fileid AS recipe
FROM default_print_style_recipes d
WHERE not exists (SELECT 1
FROM latest WHERE latest.recipe = d.fileid)
)
SELECT coalesce(ps.print_style, '(custom)') as print_style,
ps.title, coalesce(ps.recipe_type, 'web') as type,
ps.revised, ps.tag, ps.commit_id, la.count, la.bad
FROM latest la LEFT JOIN print_style_recipes ps ON
la.print_style = ps.print_style AND
la.recipe = ps.fileid
UNION ALL
SELECT ps.print_style, ps.title, ps.recipe_type,
ps.revised, ps.tag, ps.commit_id, 0 AS count, 0 AS bad
FROM defaults de JOIN print_style_recipes ps ON
de.print_style = ps.print_style AND
de.recipe = ps.fileid
ORDER BY revised desc NULLS LAST, print_style
""")
for row in cursor.fetchall():
styles.append({
'print_style': row['print_style'],
'title': row['title'],
'type': row['type'],
'revised': row['revised'],
'tag': row['tag'],
'commit_id': row['commit_id'],
'number': row['count'],
'bad': row['bad'],
'link': request.route_path('admin-print-style-single',
style=row['print_style'])
})
return {'styles': styles} | python | {
"resource": ""
} |
q261009 | get_api_keys | validation | def get_api_keys(request):
"""Return the list of API keys."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT id, key, name, groups FROM api_keys
) AS combined_rows""")
api_keys = [x[0] for x in cursor.fetchall()]
return api_keys | python | {
"resource": ""
} |
q261010 | admin_content_status_single | validation | def admin_content_status_single(request):
"""
Returns a dictionary with all the past baking statuses of a single book.
"""
uuid = request.matchdict['uuid']
try:
UUID(uuid)
except ValueError:
raise httpexceptions.HTTPBadRequest(
'{} is not a valid uuid'.format(uuid))
statement, sql_args = get_baking_statuses_sql({'uuid': uuid})
with db_connect(cursor_factory=DictCursor) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(statement, sql_args)
modules = cursor.fetchall()
if len(modules) == 0:
raise httpexceptions.HTTPBadRequest(
'{} is not a book'.format(uuid))
states = []
collection_info = modules[0]
for row in modules:
message = ''
state = row['state'] or 'PENDING'
if state == 'FAILURE': # pragma: no cover
if row['traceback'] is not None:
message = row['traceback']
latest_recipe = row['latest_recipe_id']
current_recipe = row['recipe_id']
if (latest_recipe is not None and
current_recipe != latest_recipe):
state += ' stale_recipe'
states.append({
'version': row['current_version'],
'recipe': row['recipe'],
'created': str(row['created']),
'state': state,
'state_message': message,
})
return {'uuid': str(collection_info['uuid']),
'title': collection_info['name'].decode('utf-8'),
'authors': format_authors(collection_info['authors']),
'print_style': collection_info['print_style'],
'current_recipe': collection_info['recipe_id'],
'current_ident': collection_info['module_ident'],
'current_state': states[0]['state'],
'states': states} | python | {
"resource": ""
} |
q261011 | _insert_metadata | validation | def _insert_metadata(cursor, model, publisher, message):
"""Insert a module with the given ``metadata``."""
params = model.metadata.copy()
params['publisher'] = publisher
params['publication_message'] = message
params['_portal_type'] = _model_to_portaltype(model)
params['summary'] = str(cnxepub.DocumentSummaryFormatter(model))
# Transform person structs to id lists for database array entry.
for person_field in ATTRIBUTED_ROLE_KEYS:
params[person_field] = [parse_user_uri(x['id'])
for x in params.get(person_field, [])]
params['parent_ident_hash'] = parse_parent_ident_hash(model)
# Assign the id and version if one is known.
if model.ident_hash is not None:
uuid, version = split_ident_hash(model.ident_hash,
split_version=True)
params['_uuid'] = uuid
params['_major_version'], params['_minor_version'] = version
# Lookup legacy ``moduleid``.
cursor.execute("SELECT moduleid FROM latest_modules WHERE uuid = %s",
(uuid,))
# There is the chance that a uuid and version have been set,
# but a previous publication does not exist. Therefore the
# moduleid will not be found. This happens on a pre-publication.
try:
moduleid = cursor.fetchone()[0]
except TypeError: # NoneType
moduleid = None
params['_moduleid'] = moduleid
# Verify that uuid is reserved in document_contols. If not, add it.
cursor.execute("SELECT * from document_controls where uuid = %s",
(uuid,))
try:
cursor.fetchone()[0]
except TypeError: # NoneType
cursor.execute("INSERT INTO document_controls (uuid) VALUES (%s)",
(uuid,))
created = model.metadata.get('created', None)
# Format the statement to accept the identifiers.
stmt = MODULE_INSERTION_TEMPLATE.format(**{
'__uuid__': "%(_uuid)s::uuid",
'__major_version__': "%(_major_version)s",
'__minor_version__': "%(_minor_version)s",
'__moduleid__': moduleid is None and "DEFAULT" or "%(_moduleid)s",
'__created__': created is None and "DEFAULT" or "%(created)s",
})
else:
created = model.metadata.get('created', None)
# Format the statement for defaults.
stmt = MODULE_INSERTION_TEMPLATE.format(**{
'__uuid__': "DEFAULT",
'__major_version__': "DEFAULT",
'__minor_version__': "DEFAULT",
'__moduleid__': "DEFAULT",
'__created__': created is None and "DEFAULT" or "%(created)s",
})
# Insert the metadata
cursor.execute(stmt, params)
module_ident, ident_hash = cursor.fetchone()
# Insert optional roles
_insert_optional_roles(cursor, model, module_ident)
return module_ident, ident_hash | python | {
"resource": ""
} |
q261012 | _get_file_sha1 | validation | def _get_file_sha1(file):
"""Return the SHA1 hash of the given a file-like object as ``file``.
This will seek the file back to 0 when it's finished.
"""
bits = file.read()
file.seek(0)
h = hashlib.new('sha1', bits).hexdigest()
return h | python | {
"resource": ""
} |
q261013 | _insert_file | validation | def _insert_file(cursor, file, media_type):
"""Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file.
"""
resource_hash = _get_file_sha1(file)
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s",
(resource_hash,))
try:
fileid = cursor.fetchone()[0]
except (IndexError, TypeError):
cursor.execute("INSERT INTO files (file, media_type) "
"VALUES (%s, %s)"
"RETURNING fileid",
(psycopg2.Binary(file.read()), media_type,))
fileid = cursor.fetchone()[0]
return fileid, resource_hash | python | {
"resource": ""
} |
q261014 | publish | validation | def publish(request):
"""Accept a publication request at form value 'epub'"""
if 'epub' not in request.POST:
raise httpexceptions.HTTPBadRequest("Missing EPUB in POST body.")
is_pre_publication = asbool(request.POST.get('pre-publication'))
epub_upload = request.POST['epub'].file
try:
epub = cnxepub.EPUB.from_file(epub_upload)
except: # noqa: E722
raise httpexceptions.HTTPBadRequest('Format not recognized.')
# Make a publication entry in the database for status checking
# the publication. This also creates publication entries for all
# of the content in the EPUB.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
epub_upload.seek(0)
publication_id, publications = add_publication(
cursor, epub, epub_upload, is_pre_publication)
# Poke at the publication & lookup its state.
state, messages = poke_publication_state(publication_id)
response_data = {
'publication': publication_id,
'mapping': publications,
'state': state,
'messages': messages,
}
return response_data | python | {
"resource": ""
} |
q261015 | get_publication | validation | def get_publication(request):
"""Lookup publication state"""
publication_id = request.matchdict['id']
state, messages = check_publication_state(publication_id)
response_data = {
'publication': publication_id,
'state': state,
'messages': messages,
}
return response_data | python | {
"resource": ""
} |
q261016 | includeme | validation | def includeme(config):
"""Configures the caching manager"""
global cache_manager
settings = config.registry.settings
cache_manager = CacheManager(**parse_cache_config_options(settings)) | python | {
"resource": ""
} |
q261017 | CaseDict.get | validation | def get(self, key, default=_sentinel):
"""
Gets the value from the key.
If the key doesn't exist, the default value is returned, otherwise None.
:param key: The key
:param default: The default value
:return: The value
"""
tup = self._data.get(key.lower())
if tup is not None:
return tup[1]
elif default is not _sentinel:
return default
else:
return None | python | {
"resource": ""
} |
q261018 | CaseDict.pop | validation | def pop(self, key, default=_sentinel):
"""
Removes the specified key and returns the corresponding value.
If key is not found, the default is returned if given, otherwise KeyError is raised.
:param key: The key
:param default: The default value
:return: The value
"""
if default is not _sentinel:
tup = self._data.pop(key.lower(), default)
else:
tup = self._data.pop(key.lower())
if tup is not default:
return tup[1]
else:
return default | python | {
"resource": ""
} |
q261019 | reversals | validation | def reversals(series, left=False, right=False):
"""Iterate reversal points in the series.
A reversal point is a point in the series at which the first derivative
changes sign. Reversal is undefined at the first (last) point because the
derivative before (after) this point is undefined. The first and the last
points may be treated as reversals by setting the optional parameters
`left` and `right` to True.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, yield the first point in the series (treat it as a reversal).
right: bool, optional
If True, yield the last point in the series (treat it as a reversal).
Yields
------
float
Reversal points.
"""
series = iter(series)
x_last, x = next(series), next(series)
d_last = (x - x_last)
if left:
yield x_last
for x_next in series:
if x_next == x:
continue
d_next = x_next - x
if d_last * d_next < 0:
yield x
x_last, x = x, x_next
d_last = d_next
if right:
yield x_next | python | {
"resource": ""
} |
q261020 | _sort_lows_and_highs | validation | def _sort_lows_and_highs(func):
"Decorator for extract_cycles"
@functools.wraps(func)
def wrapper(*args, **kwargs):
for low, high, mult in func(*args, **kwargs):
if low < high:
yield low, high, mult
else:
yield high, low, mult
return wrapper | python | {
"resource": ""
} |
q261021 | extract_cycles | validation | def extract_cycles(series, left=False, right=False):
"""Iterate cycles in the series.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Yields
------
cycle : tuple
Each tuple contains three floats (low, high, mult), where low and high
define cycle amplitude and mult equals to 1.0 for full cycles and 0.5
for half cycles.
"""
points = deque()
for x in reversals(series, left=left, right=right):
points.append(x)
while len(points) >= 3:
# Form ranges X and Y from the three most recent points
X = abs(points[-2] - points[-1])
Y = abs(points[-3] - points[-2])
if X < Y:
# Read the next point
break
elif len(points) == 3:
# Y contains the starting point
# Count Y as one-half cycle and discard the first point
yield points[0], points[1], 0.5
points.popleft()
else:
# Count Y as one cycle and discard the peak and the valley of Y
yield points[-3], points[-2], 1.0
last = points.pop()
points.pop()
points.pop()
points.append(last)
else:
# Count the remaining ranges as one-half cycles
while len(points) > 1:
yield points[0], points[1], 0.5
points.popleft() | python | {
"resource": ""
} |
q261022 | count_cycles | validation | def count_cycles(series, ndigits=None, left=False, right=False):
"""Count cycles in the series.
Parameters
----------
series : iterable sequence of numbers
ndigits : int, optional
Round cycle magnitudes to the given number of digits before counting.
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Returns
-------
A sorted list containing pairs of cycle magnitude and count.
One-half cycles are counted as 0.5, so the returned counts may not be
whole numbers.
"""
counts = defaultdict(float)
round_ = _get_round_function(ndigits)
for low, high, mult in extract_cycles(series, left=left, right=right):
delta = round_(abs(high - low))
counts[delta] += mult
return sorted(counts.items()) | python | {
"resource": ""
} |
q261023 | render | validation | def render(node, strict=False):
"""Recipe to render a given FST node.
The FST is composed of branch nodes which are either lists or dicts
and of leaf nodes which are strings. Branch nodes can have other
list, dict or leaf nodes as childs.
To render a string, simply output it. To render a list, render each
of its elements in order. To render a dict, you must follow the
node's entry in the nodes_rendering_order dictionary and its
dependents constraints.
This function hides all this algorithmic complexity by returning
a structured rendering recipe, whatever the type of node. But even
better, you should subclass the RenderWalker which simplifies
drastically working with the rendered FST.
The recipe is a list of steps, each step correspond to a child and is actually a 3-uple composed of the following fields:
- `key_type` is a string determining the type of the child in the second field (`item`) of the tuple. It can be one of:
- 'constant': the child is a string
- 'node': the child is a dict
- 'key': the child is an element of a dict
- 'list': the child is a list
- 'formatting': the child is a list specialized in formatting
- `item` is the child itself: either a string, a dict or a list.
- `render_key` gives the key used to access this child from the parent node. It's a string if the node is a dict or a number if its a list.
Please note that "bool" `key_types` are never rendered, that's why
they are not shown here.
"""
if isinstance(node, list):
return render_list(node)
elif isinstance(node, dict):
return render_node(node, strict=strict)
else:
raise NotImplementedError("You tried to render a %s. Only list and dicts can be rendered." % node.__class__.__name__) | python | {
"resource": ""
} |
q261024 | path_to_node | validation | def path_to_node(tree, path):
"""FST node located at the given path"""
if path is None:
return None
node = tree
for key in path:
node = child_by_key(node, key)
return node | python | {
"resource": ""
} |
q261025 | PositionFinder.before_constant | validation | def before_constant(self, constant, key):
"""Determine if we're on the targetted node.
If the targetted column is reached, `stop` and `path_found` are
set. If the targetted line is passed, only `stop` is set. This
prevents unnecessary tree travelling when the targetted column
is out of bounds.
"""
newlines_split = split_on_newlines(constant)
for c in newlines_split:
if is_newline(c):
self.current.advance_line()
# if target line is passed
if self.current.line > self.target.line:
return self.STOP
else:
advance_by = len(c)
if self.is_on_targetted_node(advance_by):
self.found_path = deepcopy(self.current_path)
return self.STOP
self.current.advance_columns(advance_by) | python | {
"resource": ""
} |
q261026 | get_prefix | validation | def get_prefix(multicodec):
"""
Returns prefix for a given multicodec
:param str multicodec: multicodec codec name
:return: the prefix for the given multicodec
:rtype: byte
:raises ValueError: if an invalid multicodec name is provided
"""
try:
prefix = varint.encode(NAME_TABLE[multicodec])
except KeyError:
raise ValueError('{} multicodec is not supported.'.format(multicodec))
return prefix | python | {
"resource": ""
} |
q261027 | add_prefix | validation | def add_prefix(multicodec, bytes_):
"""
Adds multicodec prefix to the given bytes input
:param str multicodec: multicodec to use for prefixing
:param bytes bytes_: data to prefix
:return: prefixed byte data
:rtype: bytes
"""
prefix = get_prefix(multicodec)
return b''.join([prefix, bytes_]) | python | {
"resource": ""
} |
q261028 | remove_prefix | validation | def remove_prefix(bytes_):
"""
Removes prefix from a prefixed data
:param bytes bytes_: multicodec prefixed data bytes
:return: prefix removed data bytes
:rtype: bytes
"""
prefix_int = extract_prefix(bytes_)
prefix = varint.encode(prefix_int)
return bytes_[len(prefix):] | python | {
"resource": ""
} |
q261029 | get_codec | validation | def get_codec(bytes_):
"""
Gets the codec used for prefix the multicodec prefixed data
:param bytes bytes_: multicodec prefixed data bytes
:return: name of the multicodec used to prefix
:rtype: str
"""
prefix = extract_prefix(bytes_)
try:
return CODE_TABLE[prefix]
except KeyError:
raise ValueError('Prefix {} not present in the lookup table'.format(prefix)) | python | {
"resource": ""
} |
q261030 | capture | validation | def capture(
target_url,
user_agent="archiveis (https://github.com/pastpages/archiveis)",
proxies={}
):
"""
Archives the provided URL using archive.is
Returns the URL where the capture is stored.
"""
# Put together the URL that will save our request
domain = "http://archive.vn"
save_url = urljoin(domain, "/submit/")
# Configure the request headers
headers = {
'User-Agent': user_agent,
"host": "archive.vn",
}
# Request a unique identifier for our activity
logger.debug("Requesting {}".format(domain + "/"))
get_kwargs = dict(
timeout=120,
allow_redirects=True,
headers=headers,
)
if proxies:
get_kwargs['proxies'] = proxies
response = requests.get(domain + "/", **get_kwargs)
response.raise_for_status()
# It will need to be parsed from the homepage response headers
html = str(response.content)
try:
unique_id = html.split('name="submitid', 1)[1].split('value="', 1)[1].split('"', 1)[0]
logger.debug("Unique identifier: {}".format(unique_id))
except IndexError:
logger.warn("Unable to extract unique identifier from archive.is. Submitting without it.")
unique_id = None
# Send the capture request to archive.is with the unique id included
data = {
"url": target_url,
"anyway": 1,
}
if unique_id:
data.update({"submitid": unique_id})
post_kwargs = dict(
timeout=120,
allow_redirects=True,
headers=headers,
data=data
)
if proxies:
post_kwargs['proxies'] = proxies
logger.debug("Requesting {}".format(save_url))
response = requests.post(save_url, **post_kwargs)
response.raise_for_status()
# There are a couple ways the header can come back
if 'Refresh' in response.headers:
memento = str(response.headers['Refresh']).split(';url=')[1]
logger.debug("Memento from Refresh header: {}".format(memento))
return memento
if 'Location' in response.headers:
memento = response.headers['Location']
logger.debug("Memento from Location header: {}".format(memento))
return memento
logger.debug("Memento not found in response headers. Inspecting history.")
for i, r in enumerate(response.history):
logger.debug("Inspecting history request #{}".format(i))
logger.debug(r.headers)
if 'Location' in r.headers:
memento = r.headers['Location']
logger.debug("Memento from the Location header of {} history response: {}".format(i+1, memento))
return memento
# If there's nothing at this point, throw an error
logger.error("No memento returned by archive.is")
logger.error("Status code: {}".format(response.status_code))
logger.error(response.headers)
logger.error(response.text)
raise Exception("No memento returned by archive.is") | python | {
"resource": ""
} |
q261031 | cli | validation | def cli(url, user_agent):
"""
Archives the provided URL using archive.is.
"""
kwargs = {}
if user_agent:
kwargs['user_agent'] = user_agent
archive_url = capture(url, **kwargs)
click.echo(archive_url) | python | {
"resource": ""
} |
q261032 | LiveboxPlayTv.get_channel_image | validation | def get_channel_image(self, channel, img_size=300, skip_cache=False):
"""Get the logo for a channel"""
from bs4 import BeautifulSoup
from wikipedia.exceptions import PageError
import re
import wikipedia
wikipedia.set_lang('fr')
if not channel:
_LOGGER.error('Channel is not set. Could not retrieve image.')
return
# Check if the image is in cache
if channel in self._cache_channel_img and not skip_cache:
img = self._cache_channel_img[channel]
_LOGGER.debug('Cache hit: %s -> %s', channel, img)
return img
channel_info = self.get_channel_info(channel)
query = channel_info['wiki_page']
if not query:
_LOGGER.debug('Wiki page is not set for channel %s', channel)
return
_LOGGER.debug('Query: %s', query)
# If there is a max image size defined use it.
if 'max_img_size' in channel_info:
if img_size > channel_info['max_img_size']:
_LOGGER.info(
'Requested image size is bigger than the max, '
'setting it to %s', channel_info['max_img_size']
)
img_size = channel_info['max_img_size']
try:
page = wikipedia.page(query)
_LOGGER.debug('Wikipedia article title: %s', page.title)
soup = BeautifulSoup(page.html(), 'html.parser')
images = soup.find_all('img')
img_src = None
for i in images:
if i['alt'].startswith('Image illustrative'):
img_src = re.sub(r'\d+px', '{}px'.format(img_size),
i['src'])
img = 'https:{}'.format(img_src) if img_src else None
# Cache result
self._cache_channel_img[channel] = img
return img
except PageError:
_LOGGER.error('Could not fetch channel image for %s', channel) | python | {
"resource": ""
} |
q261033 | LessLexer.t_t_eopen | validation | def t_t_eopen(self, t):
r'~"|~\''
if t.value[1] == '"':
t.lexer.push_state('escapequotes')
elif t.value[1] == '\'':
t.lexer.push_state('escapeapostrophe')
return t | python | {
"resource": ""
} |
q261034 | LessLexer.t_t_isopen | validation | def t_t_isopen(self, t):
r'"|\''
if t.value[0] == '"':
t.lexer.push_state('istringquotes')
elif t.value[0] == '\'':
t.lexer.push_state('istringapostrophe')
return t | python | {
"resource": ""
} |
q261035 | LessLexer.t_istringapostrophe_css_string | validation | def t_istringapostrophe_css_string(self, t):
r'[^\'@]+'
t.lexer.lineno += t.value.count('\n')
return t | python | {
"resource": ""
} |
q261036 | LessLexer.file | validation | def file(self, filename):
"""
Lex file.
"""
with open(filename) as f:
self.lexer.input(f.read())
return self | python | {
"resource": ""
} |
q261037 | LessLexer.input | validation | def input(self, file):
"""
Load lexer with content from `file` which can be a path or a file
like object.
"""
if isinstance(file, string_types):
with open(file) as f:
self.lexer.input(f.read())
else:
self.lexer.input(file.read()) | python | {
"resource": ""
} |
q261038 | Scope._smixins | validation | def _smixins(self, name):
"""Inner wrapper to search for mixins by name.
"""
return (self._mixins[name] if name in self._mixins else False) | python | {
"resource": ""
} |
q261039 | Scope._blocks | validation | def _blocks(self, name):
"""Inner wrapper to search for blocks by name.
"""
i = len(self)
while i >= 0:
i -= 1
if name in self[i]['__names__']:
for b in self[i]['__blocks__']:
r = b.raw()
if r and r == name:
return b
else:
for b in self[i]['__blocks__']:
r = b.raw()
if r and name.startswith(r):
b = utility.blocksearch(b, name)
if b:
return b
return False | python | {
"resource": ""
} |
q261040 | Mixin.parse_args | validation | def parse_args(self, args, scope):
"""Parse arguments to mixin. Add them to scope
as variables. Sets upp special variable @arguments
as well.
args:
args (list): arguments
scope (Scope): current scope
raises:
SyntaxError
"""
arguments = list(zip(args,
[' '] * len(args))) if args and args[0] else None
zl = itertools.zip_longest if sys.version_info[
0] == 3 else itertools.izip_longest
if self.args:
parsed = [
v if hasattr(v, 'parse') else v for v in copy.copy(self.args)
]
args = args if isinstance(args, list) else [args]
vars = [
self._parse_arg(var, arg, scope)
for arg, var in zl([a for a in args], parsed)
]
for var in vars:
if var:
var.parse(scope)
if not arguments:
arguments = [v.value for v in vars if v]
if not arguments:
arguments = ''
Variable(['@arguments', None, arguments]).parse(scope) | python | {
"resource": ""
} |
q261041 | Color.mix | validation | def mix(self, color1, color2, weight=50, *args):
"""This algorithm factors in both the user-provided weight
and the difference between the alpha values of the two colors
to decide how to perform the weighted average of the two RGB values.
It works by first normalizing both parameters to be within [-1, 1],
where 1 indicates "only use color1", -1 indicates "only use color 0",
and all values in between indicated a proportionately weighted average.
Once we have the normalized variables w and a,
we apply the formula (w + a)/(1 + w*a)
to get the combined weight (in [-1, 1]) of color1.
This formula has two especially nice properties:
* When either w or a are -1 or 1, the combined weight is also that number
(cases where w * a == -1 are undefined, and handled as a special case).
* When a is 0, the combined weight is w, and vice versa
Finally, the weight of color1 is renormalized to be within [0, 1]
and the weight of color2 is given by 1 minus the weight of color1.
Copyright (c) 2006-2009 Hampton Catlin, Nathan Weizenbaum, and Chris Eppstein
http://sass-lang.com
args:
color1 (str): first color
color2 (str): second color
weight (int/str): weight
raises:
ValueError
returns:
str
"""
if color1 and color2:
if isinstance(weight, string_types):
weight = float(weight.strip('%'))
weight = ((weight / 100.0) * 2) - 1
rgb1 = self._hextorgb(color1)
rgb2 = self._hextorgb(color2)
alpha = 0
w1 = (((weight if weight * alpha == -1 else weight + alpha) /
(1 + weight * alpha)) + 1)
w1 = w1 / 2.0
w2 = 1 - w1
rgb = [
rgb1[0] * w1 + rgb2[0] * w2,
rgb1[1] * w1 + rgb2[1] * w2,
rgb1[2] * w1 + rgb2[2] * w2,
]
return self._rgbatohex(rgb)
raise ValueError('Illegal color values') | python | {
"resource": ""
} |
q261042 | reverse_guard | validation | def reverse_guard(lst):
""" Reverse guard expression. not
(@a > 5) -> (@a =< 5)
Args:
lst (list): Expression
returns:
list
"""
rev = {'<': '>=', '>': '=<', '>=': '<', '=<': '>'}
return [rev[l] if l in rev else l for l in lst] | python | {
"resource": ""
} |
q261043 | away_from_zero_round | validation | def away_from_zero_round(value, ndigits=0):
"""Round half-way away from zero.
Python2's round() method.
"""
if sys.version_info[0] >= 3:
p = 10**ndigits
return float(math.floor((value * p) + math.copysign(0.5, value))) / p
else:
return round(value, ndigits) | python | {
"resource": ""
} |
q261044 | convergent_round | validation | def convergent_round(value, ndigits=0):
"""Convergent rounding.
Round to neareas even, similar to Python3's round() method.
"""
if sys.version_info[0] < 3:
if value < 0.0:
return -convergent_round(-value)
epsilon = 0.0000001
integral_part, _ = divmod(value, 1)
if abs(value - (integral_part + 0.5)) < epsilon:
if integral_part % 2.0 < epsilon:
return integral_part
else:
nearest_even = integral_part + 0.5
return math.ceil(nearest_even)
return round(value, ndigits) | python | {
"resource": ""
} |
q261045 | permutations_with_replacement | validation | def permutations_with_replacement(iterable, r=None):
"""Return successive r length permutations of elements in the iterable.
Similar to itertools.permutation but withouth repeated values filtering.
"""
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in itertools.product(range(n), repeat=r):
yield list(pool[i] for i in indices) | python | {
"resource": ""
} |
q261046 | LessParser.post_parse | validation | def post_parse(self):
""" Post parse cycle. nodejs version allows calls to mixins
not yet defined or known to the parser. We defer all calls
to mixins until after first cycle when all names are known.
"""
if self.result:
out = []
for pu in self.result:
try:
out.append(pu.parse(self.scope))
except SyntaxError as e:
self.handle_error(e, 0)
self.result = list(utility.flatten(out)) | python | {
"resource": ""
} |
q261047 | NextPage | validation | def NextPage(gh):
"""
Checks if a GitHub call returned multiple pages of data.
:param gh: GitHub() instance
:rtype: int
:return: number of next page or 0 if no next page
"""
header = dict(gh.getheaders())
if 'Link' in header:
parts = header['Link'].split(',')
for part in parts:
subparts = part.split(';')
sub = subparts[1].split('=')
if sub[0].strip() == 'rel':
if sub[1] == '"next"':
page = int(
re.match(
r'.*page=(\d+).*', subparts[0],
re.IGNORECASE | re.DOTALL | re.UNICODE
).groups()[0]
)
return page
return 0 | python | {
"resource": ""
} |
q261048 | Fetcher.get_all_tags | validation | def get_all_tags(self):
"""
Fetch all tags for repository from Github.
:return: tags in repository
:rtype: list
"""
verbose = self.options.verbose
gh = self.github
user = self.options.user
repo = self.options.project
if verbose:
print("Fetching tags...")
tags = []
page = 1
while page > 0:
if verbose > 2:
print(".", end="")
rc, data = gh.repos[user][repo].tags.get(
page=page, per_page=PER_PAGE_NUMBER)
if rc == 200:
tags.extend(data)
else:
self.raise_GitHubError(rc, data, gh.getheaders())
page = NextPage(gh)
if verbose > 2:
print(".")
if len(tags) == 0:
if not self.options.quiet:
print("Warning: Can't find any tags in repo. Make sure, that "
"you push tags to remote repo via 'git push --tags'")
exit()
if verbose > 1:
print("Found {} tag(s)".format(len(tags)))
return tags | python | {
"resource": ""
} |
q261049 | Fetcher.fetch_closed_pull_requests | validation | def fetch_closed_pull_requests(self):
"""
Fetch all pull requests. We need them to detect "merged_at" parameter
:rtype: list
:return: all pull requests
"""
pull_requests = []
verbose = self.options.verbose
gh = self.github
user = self.options.user
repo = self.options.project
if verbose:
print("Fetching closed pull requests...")
page = 1
while page > 0:
if verbose > 2:
print(".", end="")
if self.options.release_branch:
rc, data = gh.repos[user][repo].pulls.get(
page=page, per_page=PER_PAGE_NUMBER, state='closed',
base=self.options.release_branch
)
else:
rc, data = gh.repos[user][repo].pulls.get(
page=page, per_page=PER_PAGE_NUMBER, state='closed',
)
if rc == 200:
pull_requests.extend(data)
else:
self.raise_GitHubError(rc, data, gh.getheaders())
page = NextPage(gh)
if verbose > 2:
print(".")
if verbose > 1:
print("\tfetched {} closed pull requests.".format(
len(pull_requests))
)
return pull_requests | python | {
"resource": ""
} |
q261050 | Fetcher.fetch_repo_creation_date | validation | def fetch_repo_creation_date(self):
"""
Get the creation date of the repository from GitHub.
:rtype: str, str
:return: special tag name, creation date as ISO date string
"""
gh = self.github
user = self.options.user
repo = self.options.project
rc, data = gh.repos[user][repo].get()
if rc == 200:
return REPO_CREATED_TAG_NAME, data["created_at"]
else:
self.raise_GitHubError(rc, data, gh.getheaders())
return None, None | python | {
"resource": ""
} |
q261051 | Fetcher.fetch_events_async | validation | def fetch_events_async(self, issues, tag_name):
"""
Fetch events for all issues and add them to self.events
:param list issues: all issues
:param str tag_name: name of the tag to fetch events for
:returns: Nothing
"""
if not issues:
return issues
max_simultaneous_requests = self.options.max_simultaneous_requests
verbose = self.options.verbose
gh = self.github
user = self.options.user
repo = self.options.project
self.events_cnt = 0
if verbose:
print("fetching events for {} {}... ".format(
len(issues), tag_name)
)
def worker(issue):
page = 1
issue['events'] = []
while page > 0:
rc, data = gh.repos[user][repo].issues[
issue['number']].events.get(
page=page, per_page=PER_PAGE_NUMBER)
if rc == 200:
issue['events'].extend(data)
self.events_cnt += len(data)
else:
self.raise_GitHubError(rc, data, gh.getheaders())
page = NextPage(gh)
threads = []
cnt = len(issues)
for i in range(0, (cnt // max_simultaneous_requests) + 1):
for j in range(max_simultaneous_requests):
idx = i * max_simultaneous_requests + j
if idx == cnt:
break
t = threading.Thread(target=worker, args=(issues[idx],))
threads.append(t)
t.start()
if verbose > 2:
print(".", end="")
if not idx % PER_PAGE_NUMBER:
print("")
for t in threads:
t.join()
if verbose > 2:
print(".") | python | {
"resource": ""
} |
q261052 | Fetcher.fetch_date_of_tag | validation | def fetch_date_of_tag(self, tag):
"""
Fetch time for tag from repository.
:param dict tag: dictionary with tag information
:rtype: str
:return: time of specified tag as ISO date string
"""
if self.options.verbose > 1:
print("\tFetching date for tag {}".format(tag["name"]))
gh = self.github
user = self.options.user
repo = self.options.project
rc, data = gh.repos[user][repo].git.commits[
tag["commit"]["sha"]].get()
if rc == 200:
return data["committer"]["date"]
self.raise_GitHubError(rc, data, gh.getheaders()) | python | {
"resource": ""
} |
q261053 | Fetcher.fetch_commit | validation | def fetch_commit(self, event):
"""
Fetch commit data for specified event.
:param dict event: dictionary with event information
:rtype: dict
:return: dictionary with commit data
"""
gh = self.github
user = self.options.user
repo = self.options.project
rc, data = gh.repos[user][repo].git.commits[
event["commit_id"]].get()
if rc == 200:
return data
self.raise_GitHubError(rc, data, gh.getheaders()) | python | {
"resource": ""
} |
q261054 | ChangelogGenerator.run | validation | def run(self):
"""
The entry point of this script to generate change log
'ChangelogGeneratorError' Is thrown when one
of the specified tags was not found in list of tags.
"""
if not self.options.project or not self.options.user:
print("Project and/or user missing. "
"For help run:\n pygcgen --help")
return
if not self.options.quiet:
print("Generating changelog...")
log = None
try:
log = self.generator.compound_changelog()
except ChangelogGeneratorError as err:
print("\n\033[91m\033[1m{}\x1b[0m".format(err.args[0]))
exit(1)
if not log:
if not self.options.quiet:
print("Empty changelog generated. {} not written.".format(
self.options.output)
)
return
if self.options.no_overwrite:
out = checkname(self.options.output)
else:
out = self.options.output
with codecs.open(out, "w", "utf-8") as fh:
fh.write(log)
if not self.options.quiet:
print("Done!")
print("Generated changelog written to {}".format(out)) | python | {
"resource": ""
} |
q261055 | parse | validation | def parse(data):
"""
Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...]
"""
sections = re.compile("^## .+$", re.MULTILINE).split(data)
headings = re.findall("^## .+?$", data, re.MULTILINE)
sections.pop(0)
parsed = []
def func(h, s):
p = parse_heading(h)
p["content"] = s
parsed.append(p)
list(map(func, headings, sections))
return parsed | python | {
"resource": ""
} |
q261056 | DaemonContext._signal_handler_map | validation | def _signal_handler_map(self):
""" Create the signal handler map
create a dictionary with signal:handler mapping based on
self.signal_map
:return: dict
"""
result = {}
for signum, handler in self.signal_map.items():
result[signum] = self._get_signal_handler(handler)
return result | python | {
"resource": ""
} |
q261057 | DaemonContext.open | validation | def open(self):
""" Daemonize this process
Do everything that is needed to become a Unix daemon.
:return: None
:raise: DaemonError
"""
if self.is_open:
return
try:
os.chdir(self.working_directory)
if self.chroot_directory:
os.chroot(self.chroot_directory)
os.setgid(self.gid)
os.setuid(self.uid)
os.umask(self.umask)
except OSError as err:
raise DaemonError('Setting up Environment failed: {0}'
.format(err))
if self.prevent_core:
try:
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except Exception as err:
raise DaemonError('Could not disable core files: {0}'
.format(err))
if self.detach_process:
try:
if os.fork() > 0:
os._exit(0)
except OSError as err:
raise DaemonError('First fork failed: {0}'.format(err))
os.setsid()
try:
if os.fork() > 0:
os._exit(0)
except OSError as err:
raise DaemonError('Second fork failed: {0}'.format(err))
for (signal_number, handler) in self._signal_handler_map.items():
signal.signal(signal_number, handler)
close_filenos(self._files_preserve)
redirect_stream(sys.stdin, self.stdin)
redirect_stream(sys.stdout, self.stdout)
redirect_stream(sys.stderr, self.stderr)
if self.pidfile:
self.pidfile.acquire()
self._is_open = True | python | {
"resource": ""
} |
q261058 | OptionsParser.user_and_project_from_git | validation | def user_and_project_from_git(self, options, arg0=None, arg1=None):
""" Detects user and project from git. """
user, project = self.user_project_from_option(options, arg0, arg1)
if user and project:
return user, project
try:
remote = subprocess.check_output(
[
'git', 'config', '--get',
'remote.{0}.url'.format(options.git_remote)
]
)
except subprocess.CalledProcessError:
return None, None
except WindowsError:
print("git binary not found.")
exit(1)
else:
return self.user_project_from_remote(remote) | python | {
"resource": ""
} |
q261059 | timestring_to_datetime | validation | def timestring_to_datetime(timestring):
"""
Convert an ISO formated date and time string to a datetime object.
:param str timestring: String with date and time in ISO format.
:rtype: datetime
:return: datetime object
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UnicodeWarning)
result = dateutil_parser(timestring)
return result | python | {
"resource": ""
} |
q261060 | Generator.fetch_events_for_issues_and_pr | validation | def fetch_events_for_issues_and_pr(self):
"""
Fetch event for issues and pull requests
@return [Array] array of fetched issues
"""
# Async fetching events:
self.fetcher.fetch_events_async(self.issues, "issues")
self.fetcher.fetch_events_async(self.pull_requests, "pull requests") | python | {
"resource": ""
} |
q261061 | Generator.fetch_tags_dates | validation | def fetch_tags_dates(self):
""" Async fetching of all tags dates. """
if self.options.verbose:
print(
"Fetching dates for {} tags...".format(len(self.filtered_tags))
)
def worker(tag):
self.get_time_of_tag(tag)
# Async fetching tags:
threads = []
max_threads = 50
cnt = len(self.filtered_tags)
for i in range(0, (cnt // max_threads) + 1):
for j in range(max_threads):
idx = i * 50 + j
if idx == cnt:
break
t = threading.Thread(target=worker,
args=(self.filtered_tags[idx],))
threads.append(t)
t.start()
if self.options.verbose > 2:
print(".", end="")
for t in threads:
t.join()
if self.options.verbose > 2:
print(".")
if self.options.verbose > 1:
print("Fetched dates for {} tags.".format(
len(self.tag_times_dict))
) | python | {
"resource": ""
} |
q261062 | Generator.detect_actual_closed_dates | validation | def detect_actual_closed_dates(self, issues, kind):
"""
Find correct closed dates, if issues was closed by commits.
:param list issues: issues to check
:param str kind: either "issues" or "pull requests"
:rtype: list
:return: issues with updated closed dates
"""
if self.options.verbose:
print("Fetching closed dates for {} {}...".format(
len(issues), kind)
)
all_issues = copy.deepcopy(issues)
for issue in all_issues:
if self.options.verbose > 2:
print(".", end="")
if not issues.index(issue) % 30:
print("")
self.find_closed_date_by_commit(issue)
if not issue.get('actual_date', False):
if issue.get('closed_at', False):
print("Skipping closed non-merged issue: #{0} {1}".format(
issue["number"], issue["title"]))
all_issues.remove(issue)
if self.options.verbose > 2:
print(".")
return all_issues | python | {
"resource": ""
} |
q261063 | Generator.find_closed_date_by_commit | validation | def find_closed_date_by_commit(self, issue):
"""
Fill "actual_date" parameter of specified issue by closed date of
the commit, if it was closed by commit.
:param dict issue: issue to edit
"""
if not issue.get('events'):
return
# if it's PR -> then find "merged event", in case
# of usual issue -> find closed date
compare_string = "merged" if 'merged_at' in issue else "closed"
# reverse! - to find latest closed event. (event goes in date order)
# if it were reopened and closed again.
issue['events'].reverse()
found_date = False
for event in issue['events']:
if event["event"] == compare_string:
self.set_date_from_event(event, issue)
found_date = True
break
if not found_date:
# TODO: assert issues, that remain without
# 'actual_date' hash for some reason.
print("\nWARNING: Issue without 'actual_date':"
" #{0} {1}".format(issue["number"], issue["title"])) | python | {
"resource": ""
} |
q261064 | Generator.set_date_from_event | validation | def set_date_from_event(self, event, issue):
"""
Set closed date from this issue.
:param dict event: event data
:param dict issue: issue data
"""
if not event.get('commit_id', None):
issue['actual_date'] = timestring_to_datetime(issue['closed_at'])
return
try:
commit = self.fetcher.fetch_commit(event)
issue['actual_date'] = timestring_to_datetime(
commit['author']['date']
)
except ValueError:
print("WARNING: Can't fetch commit {0}. "
"It is probably referenced from another repo.".
format(event['commit_id']))
issue['actual_date'] = timestring_to_datetime(issue['closed_at']) | python | {
"resource": ""
} |
q261065 | Generator.encapsulate_string | validation | def encapsulate_string(raw_string):
"""
Encapsulate characters to make markdown look as expected.
:param str raw_string: string to encapsulate
:rtype: str
:return: encapsulated input string
"""
raw_string.replace('\\', '\\\\')
enc_string = re.sub("([<>*_()\[\]#])", r"\\\1", raw_string)
return enc_string | python | {
"resource": ""
} |
q261066 | Generator.compound_changelog | validation | def compound_changelog(self):
"""
Main function to start change log generation
:rtype: str
:return: Generated change log file
"""
self.fetch_and_filter_tags()
tags_sorted = self.sort_tags_by_date(self.filtered_tags)
self.filtered_tags = tags_sorted
self.fetch_and_filter_issues_and_pr()
log = str(self.options.frontmatter) \
if self.options.frontmatter else u""
log += u"{0}\n\n".format(self.options.header)
if self.options.unreleased_only:
log += self.generate_unreleased_section()
else:
log += self.generate_log_for_all_tags()
try:
with open(self.options.base) as fh:
log += fh.read()
except (TypeError, IOError):
pass
return log | python | {
"resource": ""
} |
q261067 | Generator.generate_sub_section | validation | def generate_sub_section(self, issues, prefix):
"""
Generate formated list of issues for changelog.
:param list issues: Issues to put in sub-section.
:param str prefix: Title of sub-section.
:rtype: str
:return: Generated ready-to-add sub-section.
"""
log = ""
if issues:
if not self.options.simple_list:
log += u"{0}\n\n".format(prefix)
for issue in issues:
merge_string = self.get_string_for_issue(issue)
log += u"- {0}\n".format(merge_string)
log += "\n"
return log | python | {
"resource": ""
} |
q261068 | Generator.generate_header | validation | def generate_header(self, newer_tag_name, newer_tag_link,
newer_tag_time,
older_tag_link, project_url):
"""
Generate a header for a tag section with specific parameters.
:param str newer_tag_name: Name (title) of newer tag.
:param str newer_tag_link: Tag name of newer tag, used for links.
Could be same as **newer_tag_name** or some
specific value, like `HEAD`.
:param datetime newer_tag_time: Date and time when
newer tag was created.
:param str older_tag_link: Tag name of older tag, used for links.
:param str project_url: URL for current project.
:rtype: str
:return: Generated ready-to-add tag section.
"""
log = ""
# Generate date string:
# noinspection PyUnresolvedReferences
time_string = newer_tag_time.strftime(self.options.date_format)
# Generate tag name and link
if self.options.release_url:
release_url = self.options.release_url.format(newer_tag_link)
else:
release_url = u"{project_url}/tree/{newer_tag_link}".format(
project_url=project_url, newer_tag_link=newer_tag_link)
if not self.options.unreleased_with_date and \
newer_tag_name == self.options.unreleased_label:
log += u"## [{newer_tag_name}]({release_url})\n\n".format(
newer_tag_name=newer_tag_name, release_url=release_url)
else:
log += u"## [{newer_tag_name}]({release_url}) " \
u"({time_string})\n".format(
newer_tag_name=newer_tag_name,
release_url=release_url,
time_string=time_string
)
if self.options.compare_link \
and older_tag_link != REPO_CREATED_TAG_NAME:
# Generate compare link
log += u"[Full Changelog]"
log += u"({project_url}/compare/{older_tag_link}".format(
project_url=project_url,
older_tag_link=older_tag_link,
)
log += u"...{newer_tag_link})\n\n".format(
newer_tag_link=newer_tag_link
)
return log | python | {
"resource": ""
} |
q261069 | Generator.generate_log_between_tags | validation | def generate_log_between_tags(self, older_tag, newer_tag):
"""
Generate log between 2 specified tags.
:param dict older_tag: All issues before this tag's date will be
excluded. May be special value, if new tag is
the first tag. (Means **older_tag** is when
the repo was created.)
:param dict newer_tag: All issues after this tag's date will be
excluded. May be title of unreleased section.
:rtype: str
:return: Generated ready-to-add tag section for newer tag.
"""
filtered_issues, filtered_pull_requests = \
self.filter_issues_for_tags(newer_tag, older_tag)
older_tag_name = older_tag["name"] if older_tag \
else self.detect_since_tag()
if not filtered_issues and not filtered_pull_requests:
# do not generate an unreleased section if it would be empty
return ""
return self.generate_log_for_tag(
filtered_pull_requests, filtered_issues,
newer_tag, older_tag_name) | python | {
"resource": ""
} |
q261070 | Generator.filter_issues_for_tags | validation | def filter_issues_for_tags(self, newer_tag, older_tag):
"""
Apply all filters to issues and pull requests.
:param dict older_tag: All issues before this tag's date will be
excluded. May be special value, if new tag is
the first tag. (Means **older_tag** is when
the repo was created.)
:param dict newer_tag: All issues after this tag's date will be
excluded. May be title of unreleased section.
:rtype: list(dict), list(dict)
:return: Filtered issues and pull requests.
"""
filtered_pull_requests = self.delete_by_time(self.pull_requests,
older_tag, newer_tag)
filtered_issues = self.delete_by_time(self.issues, older_tag,
newer_tag)
newer_tag_name = newer_tag["name"] if newer_tag else None
if self.options.filter_issues_by_milestone:
# delete excess irrelevant issues (according milestones).Issue #22.
filtered_issues = self.filter_by_milestone(
filtered_issues, newer_tag_name, self.issues
)
filtered_pull_requests = self.filter_by_milestone(
filtered_pull_requests, newer_tag_name, self.pull_requests
)
return filtered_issues, filtered_pull_requests | python | {
"resource": ""
} |
q261071 | Generator.generate_log_for_all_tags | validation | def generate_log_for_all_tags(self):
"""
The full cycle of generation for whole project.
:rtype: str
:return: The complete change log for released tags.
"""
if self.options.verbose:
print("Generating log...")
self.issues2 = copy.deepcopy(self.issues)
log1 = ""
if self.options.with_unreleased:
log1 = self.generate_unreleased_section()
log = ""
for index in range(len(self.filtered_tags) - 1):
log += self.do_generate_log_for_all_tags_part1(log, index)
if self.options.tag_separator and log1:
log = log1 + self.options.tag_separator + log
else:
log = log1 + log
if len(self.filtered_tags) != 0:
log += self.do_generate_log_for_all_tags_part2(log)
return log | python | {
"resource": ""
} |
q261072 | Generator.generate_unreleased_section | validation | def generate_unreleased_section(self):
"""
Generate log for unreleased closed issues.
:rtype: str
:return: Generated ready-to-add unreleased section.
"""
if not self.filtered_tags:
return ""
now = datetime.datetime.utcnow()
now = now.replace(tzinfo=dateutil.tz.tzutc())
head_tag = {"name": self.options.unreleased_label}
self.tag_times_dict[head_tag["name"]] = now
unreleased_log = self.generate_log_between_tags(
self.filtered_tags[0], head_tag)
return unreleased_log | python | {
"resource": ""
} |
q261073 | Generator.get_string_for_issue | validation | def get_string_for_issue(self, issue):
"""
Parse issue and generate single line formatted issue line.
Example output:
- Add coveralls integration [\#223](https://github.com/skywinder/github-changelog-generator/pull/223) ([skywinder](https://github.com/skywinder))
- Add coveralls integration [\#223](https://github.com/skywinder/github-changelog-generator/pull/223) (@skywinder)
:param dict issue: Fetched issue from GitHub.
:rtype: str
:return: Markdown-formatted single issue.
"""
encapsulated_title = self.encapsulate_string(issue['title'])
try:
title_with_number = u"{0} [\\#{1}]({2})".format(
encapsulated_title, issue["number"], issue["html_url"]
)
except UnicodeEncodeError:
# TODO: why did i add this? Is it needed?
title_with_number = "ERROR ERROR ERROR: #{0} {1}".format(
issue["number"], issue['title']
)
print(title_with_number, '\n', issue["html_url"])
return self.issue_line_with_user(title_with_number, issue) | python | {
"resource": ""
} |
q261074 | Generator.issue_line_with_user | validation | def issue_line_with_user(self, line, issue):
"""
If option author is enabled, a link to the profile of the author
of the pull reqest will be added to the issue line.
:param str line: String containing a markdown-formatted single issue.
:param dict issue: Fetched issue from GitHub.
:rtype: str
:return: Issue line with added author link.
"""
if not issue.get("pull_request") or not self.options.author:
return line
if not issue.get("user"):
line += u" (Null user)"
elif self.options.username_as_tag:
line += u" (@{0})".format(
issue["user"]["login"]
)
else:
line += u" ([{0}]({1}))".format(
issue["user"]["login"], issue["user"]["html_url"]
)
return line | python | {
"resource": ""
} |
q261075 | Generator.generate_log_for_tag | validation | def generate_log_for_tag(self,
pull_requests,
issues,
newer_tag,
older_tag_name):
"""
Generates log for tag section with header and body.
:param list(dict) pull_requests: List of PR's in this tag section.
:param list(dict) issues: List of issues in this tag section.
:param dict newer_tag: Github data of tag for this section.
:param str older_tag_name: Older tag, used for the links.
May be special value, if **newer tag** is
the first tag. (Means **older_tag** is when
the repo was created.)
:rtype: str
:return: Ready-to-add and parsed tag section.
"""
newer_tag_link, newer_tag_name, \
newer_tag_time = self.detect_link_tag_time(newer_tag)
github_site = "https://github.com" or self.options.github_endpoint
project_url = "{0}/{1}/{2}".format(
github_site, self.options.user, self.options.project)
log = self.generate_header(newer_tag_name, newer_tag_link,
newer_tag_time, older_tag_name, project_url)
if self.options.issues:
# Generate issues:
log += self.issues_to_log(issues, pull_requests)
if self.options.include_pull_request:
# Generate pull requests:
log += self.generate_sub_section(
pull_requests, self.options.merge_prefix
)
return log | python | {
"resource": ""
} |
q261076 | Generator.issues_to_log | validation | def issues_to_log(self, issues, pull_requests):
"""
Generate ready-to-paste log from list of issues and pull requests.
:param list(dict) issues: List of issues in this tag section.
:param list(dict) pull_requests: List of PR's in this tag section.
:rtype: str
:return: Generated log for issues and pull requests.
"""
log = ""
sections_a, issues_a = self.parse_by_sections(
issues, pull_requests)
for section, s_issues in sections_a.items():
log += self.generate_sub_section(s_issues, section)
log += self.generate_sub_section(issues_a, self.options.issue_prefix)
return log | python | {
"resource": ""
} |
q261077 | Generator.exclude_issues_by_labels | validation | def exclude_issues_by_labels(self, issues):
"""
Delete all issues with labels from exclude-labels option.
:param list(dict) issues: All issues for tag.
:rtype: list(dict)
:return: Filtered issues.
"""
if not self.options.exclude_labels:
return copy.deepcopy(issues)
remove_issues = set()
exclude_labels = self.options.exclude_labels
include_issues = []
for issue in issues:
for label in issue["labels"]:
if label["name"] in exclude_labels:
remove_issues.add(issue["number"])
break
for issue in issues:
if issue["number"] not in remove_issues:
include_issues.append(issue)
return include_issues | python | {
"resource": ""
} |
q261078 | Generator.find_issues_to_add | validation | def find_issues_to_add(all_issues, tag_name):
"""
Add all issues, that should be in that tag, according to milestone.
:param list(dict) all_issues: All issues.
:param str tag_name: Name (title) of tag.
:rtype: List[dict]
:return: Issues filtered by milestone.
"""
filtered = []
for issue in all_issues:
if issue.get("milestone"):
if issue["milestone"]["title"] == tag_name:
iss = copy.deepcopy(issue)
filtered.append(iss)
return filtered | python | {
"resource": ""
} |
q261079 | Generator.delete_by_time | validation | def delete_by_time(self, issues, older_tag, newer_tag):
"""
Filter issues that belong to specified tag range.
:param list(dict) issues: Issues to filter.
:param dict older_tag: All issues before this tag's date will be
excluded. May be special value, if **newer_tag**
is the first tag. (Means **older_tag** is when
the repo was created.)
:param dict newer_tag: All issues after this tag's date will be
excluded. May be title of unreleased section.
:rtype: list(dict)
:return: Filtered issues.
"""
if not older_tag and not newer_tag:
# in case if no tags are specified - return unchanged array
return copy.deepcopy(issues)
newer_tag_time = self.get_time_of_tag(newer_tag)
older_tag_time = self.get_time_of_tag(older_tag)
filtered = []
for issue in issues:
if issue.get('actual_date'):
rslt = older_tag_time < issue['actual_date'] <= newer_tag_time
if rslt:
filtered.append(copy.deepcopy(issue))
return filtered | python | {
"resource": ""
} |
q261080 | Generator.include_issues_by_labels | validation | def include_issues_by_labels(self, all_issues):
"""
Include issues with labels, specified in self.options.include_labels.
:param list(dict) all_issues: All issues.
:rtype: list(dict)
:return: Filtered issues.
"""
included_by_labels = self.filter_by_include_labels(all_issues)
wo_labels = self.filter_wo_labels(all_issues)
il = set([f["number"] for f in included_by_labels])
wl = set([w["number"] for w in wo_labels])
filtered_issues = []
for issue in all_issues:
if issue["number"] in il or issue["number"] in wl:
filtered_issues.append(issue)
return filtered_issues | python | {
"resource": ""
} |
q261081 | Generator.filter_wo_labels | validation | def filter_wo_labels(self, all_issues):
"""
Filter all issues that don't have a label.
:rtype: list(dict)
:return: Issues without labels.
"""
issues_wo_labels = []
if not self.options.add_issues_wo_labels:
for issue in all_issues:
if not issue['labels']:
issues_wo_labels.append(issue)
return issues_wo_labels | python | {
"resource": ""
} |
q261082 | Generator.filter_by_include_labels | validation | def filter_by_include_labels(self, issues):
"""
Filter issues to include only issues with labels
specified in include_labels.
:param list(dict) issues: Pre-filtered issues.
:rtype: list(dict)
:return: Filtered issues.
"""
if not self.options.include_labels:
return copy.deepcopy(issues)
filtered_issues = []
include_labels = set(self.options.include_labels)
for issue in issues:
labels = [label["name"] for label in issue["labels"]]
if include_labels.intersection(labels):
filtered_issues.append(issue)
return filtered_issues | python | {
"resource": ""
} |
q261083 | Generator.get_filtered_pull_requests | validation | def get_filtered_pull_requests(self, pull_requests):
"""
This method fetches missing params for PR and filter them
by specified options. It include add all PR's with labels
from options.include_labels and exclude all from
options.exclude_labels.
:param list(dict) pull_requests: All pull requests.
:rtype: list(dict)
:return: Filtered pull requests.
"""
pull_requests = self.filter_by_labels(pull_requests, "pull requests")
pull_requests = self.filter_merged_pull_requests(pull_requests)
if self.options.verbose > 1:
print("\tremaining pull requests: {}".format(len(pull_requests)))
return pull_requests | python | {
"resource": ""
} |
q261084 | Generator.filter_merged_pull_requests | validation | def filter_merged_pull_requests(self, pull_requests):
"""
This method filter only merged PR and fetch missing required
attributes for pull requests. Using merged date is more correct
than closed date.
:param list(dict) pull_requests: Pre-filtered pull requests.
:rtype: list(dict)
:return:
"""
if self.options.verbose:
print("Fetching merge date for pull requests...")
closed_pull_requests = self.fetcher.fetch_closed_pull_requests()
if not pull_requests:
return []
pulls = copy.deepcopy(pull_requests)
for pr in pulls:
fetched_pr = None
for fpr in closed_pull_requests:
if fpr['number'] == pr['number']:
fetched_pr = fpr
if fetched_pr:
pr['merged_at'] = fetched_pr['merged_at']
closed_pull_requests.remove(fetched_pr)
for pr in pulls:
if not pr.get('merged_at'):
pulls.remove(pr)
return pulls | python | {
"resource": ""
} |
q261085 | Generator.fetch_and_filter_tags | validation | def fetch_and_filter_tags(self):
"""
Fetch and filter tags, fetch dates and sort them in time order.
"""
self.all_tags = self.fetcher.get_all_tags()
self.filtered_tags = self.get_filtered_tags(self.all_tags)
self.fetch_tags_dates() | python | {
"resource": ""
} |
q261086 | Generator.sort_tags_by_date | validation | def sort_tags_by_date(self, tags):
"""
Sort all tags by date.
:param list(dict) tags: All tags.
:rtype: list(dict)
:return: Sorted list of tags.
"""
if self.options.verbose:
print("Sorting tags...")
tags.sort(key=lambda x: self.get_time_of_tag(x))
tags.reverse()
return tags | python | {
"resource": ""
} |
q261087 | Generator.get_time_of_tag | validation | def get_time_of_tag(self, tag):
"""
Get date and time for tag, fetching it if not already cached.
:param dict tag: Tag to get the datetime for.
:rtype: datetime
:return: datetime for specified tag.
"""
if not tag:
raise ChangelogGeneratorError("tag is nil")
name_of_tag = tag["name"]
time_for_name = self.tag_times_dict.get(name_of_tag, None)
if time_for_name:
return time_for_name
else:
time_string = self.fetcher.fetch_date_of_tag(tag)
try:
self.tag_times_dict[name_of_tag] = \
timestring_to_datetime(time_string)
except UnicodeWarning:
print("ERROR ERROR:", tag)
self.tag_times_dict[name_of_tag] = \
timestring_to_datetime(time_string)
return self.tag_times_dict[name_of_tag] | python | {
"resource": ""
} |
q261088 | Generator.detect_link_tag_time | validation | def detect_link_tag_time(self, tag):
"""
Detect link, name and time for specified tag.
:param dict tag: Tag data.
:rtype: str, str, datetime
:return: Link, name and time of the tag.
"""
# if tag is nil - set current time
newer_tag_time = self.get_time_of_tag(tag) if tag \
else datetime.datetime.now()
# if it's future release tag - set this value
if tag["name"] == self.options.unreleased_label \
and self.options.future_release:
newer_tag_name = self.options.future_release
newer_tag_link = self.options.future_release
elif tag["name"] is not self.options.unreleased_label :
# put unreleased label if there is no name for the tag
newer_tag_name = tag["name"]
newer_tag_link = newer_tag_name
else:
newer_tag_name = self.options.unreleased_label
newer_tag_link = "HEAD"
return [newer_tag_link, newer_tag_name, newer_tag_time] | python | {
"resource": ""
} |
q261089 | Generator.version_of_first_item | validation | def version_of_first_item(self):
"""
Try to detect the newest tag from self.options.base, otherwise
return a special value indicating the creation of the repo.
:rtype: str
:return: Tag name to use as 'oldest' tag. May be special value,
indicating the creation of the repo.
"""
try:
sections = read_changelog(self.options)
return sections[0]["version"]
except(IOError, TypeError):
return self.get_temp_tag_for_repo_creation() | python | {
"resource": ""
} |
q261090 | Generator.get_temp_tag_for_repo_creation | validation | def get_temp_tag_for_repo_creation(self):
"""
If not already cached, fetch the creation date of the repo, cache it
and return the special value indicating the creation of the repo.
:rtype: str
:return: value indicating the creation
"""
tag_date = self.tag_times_dict.get(REPO_CREATED_TAG_NAME, None)
if not tag_date:
tag_name, tag_date = self.fetcher.fetch_repo_creation_date()
self.tag_times_dict[tag_name] = timestring_to_datetime(tag_date)
return REPO_CREATED_TAG_NAME | python | {
"resource": ""
} |
q261091 | Generator.filter_since_tag | validation | def filter_since_tag(self, all_tags):
"""
Filter tags according since_tag option.
:param list(dict) all_tags: All tags.
:rtype: list(dict)
:return: Filtered tags.
"""
tag = self.detect_since_tag()
if not tag or tag == REPO_CREATED_TAG_NAME:
return copy.deepcopy(all_tags)
filtered_tags = []
tag_names = [t["name"] for t in all_tags]
try:
idx = tag_names.index(tag)
except ValueError:
self.warn_if_tag_not_found(tag, "since-tag")
return copy.deepcopy(all_tags)
since_tag = all_tags[idx]
since_date = self.get_time_of_tag(since_tag)
for t in all_tags:
tag_date = self.get_time_of_tag(t)
if since_date <= tag_date:
filtered_tags.append(t)
return filtered_tags | python | {
"resource": ""
} |
q261092 | Generator.filter_due_tag | validation | def filter_due_tag(self, all_tags):
"""
Filter tags according due_tag option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered_tags = []
tag = self.options.due_tag
tag_names = [t["name"] for t in all_tags]
try:
idx = tag_names.index(tag)
except ValueError:
self.warn_if_tag_not_found(tag, "due-tag")
return copy.deepcopy(all_tags)
due_tag = all_tags[idx]
due_date = self.get_time_of_tag(due_tag)
for t in all_tags:
tag_date = self.get_time_of_tag(t)
if tag_date <= due_date:
filtered_tags.append(t)
return filtered_tags | python | {
"resource": ""
} |
q261093 | Generator.filter_between_tags | validation | def filter_between_tags(self, all_tags):
"""
Filter tags according between_tags option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
tag_names = [t["name"] for t in all_tags]
between_tags = []
for tag in self.options.between_tags:
try:
idx = tag_names.index(tag)
except ValueError:
raise ChangelogGeneratorError(
"ERROR: can't find tag {0}, specified with "
"--between-tags option.".format(tag))
between_tags.append(all_tags[idx])
between_tags = self.sort_tags_by_date(between_tags)
if len(between_tags) == 1:
# if option --between-tags was only 1 tag given, duplicate it
# to generate the changelog only for that one tag.
between_tags.append(between_tags[0])
older = self.get_time_of_tag(between_tags[1])
newer = self.get_time_of_tag(between_tags[0])
for tag in all_tags:
if older < self.get_time_of_tag(tag) < newer:
between_tags.append(tag)
if older == newer:
between_tags.pop(0)
return between_tags | python | {
"resource": ""
} |
q261094 | Generator.filter_excluded_tags | validation | def filter_excluded_tags(self, all_tags):
"""
Filter tags according exclude_tags and exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered_tags = copy.deepcopy(all_tags)
if self.options.exclude_tags:
filtered_tags = self.apply_exclude_tags(filtered_tags)
if self.options.exclude_tags_regex:
filtered_tags = self.apply_exclude_tags_regex(filtered_tags)
return filtered_tags | python | {
"resource": ""
} |
q261095 | Generator.apply_exclude_tags_regex | validation | def apply_exclude_tags_regex(self, all_tags):
"""
Filter tags according exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered = []
for tag in all_tags:
if not re.match(self.options.exclude_tags_regex, tag["name"]):
filtered.append(tag)
if len(all_tags) == len(filtered):
self.warn_if_nonmatching_regex()
return filtered | python | {
"resource": ""
} |
q261096 | Generator.apply_exclude_tags | validation | def apply_exclude_tags(self, all_tags):
"""
Filter tags according exclude_tags option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered = copy.deepcopy(all_tags)
for tag in all_tags:
if tag["name"] not in self.options.exclude_tags:
self.warn_if_tag_not_found(tag, "exclude-tags")
else:
filtered.remove(tag)
return filtered | python | {
"resource": ""
} |
q261097 | parse | validation | def parse(packet):
"""
Parses an APRS packet and returns a dict with decoded data
- All attributes are in metric units
"""
if not isinstance(packet, string_type_parse):
raise TypeError("Expected packet to be str/unicode/bytes, got %s", type(packet))
if len(packet) == 0:
raise ParseError("packet is empty", packet)
# attempt to detect encoding
if isinstance(packet, bytes):
packet = _unicode_packet(packet)
packet = packet.rstrip("\r\n")
logger.debug("Parsing: %s", packet)
# split into head and body
try:
(head, body) = packet.split(':', 1)
except:
raise ParseError("packet has no body", packet)
if len(body) == 0:
raise ParseError("packet body is empty", packet)
parsed = {
'raw': packet,
}
# parse head
try:
parsed.update(parse_header(head))
except ParseError as msg:
raise ParseError(str(msg), packet)
# parse body
packet_type = body[0]
body = body[1:]
if len(body) == 0 and packet_type != '>':
raise ParseError("packet body is empty after packet type character", packet)
# attempt to parse the body
try:
_try_toparse_body(packet_type, body, parsed)
# capture ParseErrors and attach the packet
except (UnknownFormat, ParseError) as exp:
exp.packet = packet
raise
# if we fail all attempts to parse, try beacon packet
if 'format' not in parsed:
if not re.match(r"^(AIR.*|ALL.*|AP.*|BEACON|CQ.*|GPS.*|DF.*|DGPS.*|"
"DRILL.*|DX.*|ID.*|JAVA.*|MAIL.*|MICE.*|QST.*|QTH.*|"
"RTCM.*|SKY.*|SPACE.*|SPC.*|SYM.*|TEL.*|TEST.*|TLM.*|"
"WX.*|ZIP.*|UIDIGI)$", parsed['to']):
raise UnknownFormat("format is not supported", packet)
parsed.update({
'format': 'beacon',
'text': packet_type + body,
})
logger.debug("Parsed ok.")
return parsed | python | {
"resource": ""
} |
q261098 | to_decimal | validation | def to_decimal(text):
"""
Takes a base91 char string and returns decimal
"""
if not isinstance(text, string_type):
raise TypeError("expected str or unicode, %s given" % type(text))
if findall(r"[\x00-\x20\x7c-\xff]", text):
raise ValueError("invalid character in sequence")
text = text.lstrip('!')
decimal = 0
length = len(text) - 1
for i, char in enumerate(text):
decimal += (ord(char) - 33) * (91 ** (length - i))
return decimal if text != '' else 0 | python | {
"resource": ""
} |
q261099 | from_decimal | validation | def from_decimal(number, width=1):
"""
Takes a decimal and returns base91 char string.
With optional parameter for fix with output
"""
text = []
if not isinstance(number, int_type):
raise TypeError("Expected number to be int, got %s", type(number))
elif not isinstance(width, int_type):
raise TypeError("Expected width to be int, got %s", type(number))
elif number < 0:
raise ValueError("Expected number to be positive integer")
elif number > 0:
max_n = ceil(log(number) / log(91))
for n in _range(int(max_n), -1, -1):
quotient, number = divmod(number, 91**n)
text.append(chr(33 + quotient))
return "".join(text).lstrip('!').rjust(max(1, width), '!') | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.