_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q261000 | _validate_roles | validation | def _validate_roles(model):
"""Given the model, check that all the metadata role values
have valid information in them and any required metadata fields
contain values.
"""
required_roles = (ATTRIBUTED_ROLE_KEYS[0], ATTRIBUTED_ROLE_KEYS[4],)
for role_key in ATTRIBUTED_ROLE_KEYS:
try:
roles = model.metadata[role_key]
except KeyError:
if role_key in required_roles:
raise exceptions.MissingRequiredMetadata(role_key)
else:
| python | {
"resource": ""
} |
q261001 | _validate_subjects | validation | def _validate_subjects(cursor, model):
"""Give a database cursor and model, check the subjects against
the subject vocabulary.
"""
subject_vocab = [term[0] for term in acquire_subject_vocabulary(cursor)]
subjects = model.metadata.get('subjects', [])
invalid_subjects = [s | python | {
"resource": ""
} |
q261002 | validate_model | validation | def validate_model(cursor, model):
"""Validates the model using a series of checks on bits of the data."""
# Check the license is one valid for publication.
_validate_license(model)
_validate_roles(model)
# Other required metadata includes: title, summary
required_metadata = ('title', 'summary',)
for metadata_key in required_metadata:
if model.metadata.get(metadata_key) in [None, '', []]:
| python | {
"resource": ""
} |
q261003 | lookup_document_pointer | validation | def lookup_document_pointer(ident_hash, cursor):
"""Lookup a document by id and version."""
id, version = split_ident_hash(ident_hash, split_version=True)
stmt = "SELECT name FROM modules WHERE uuid = %s"
args = [id]
if version and version[0] is not None:
operator = version[1] is None and 'is' or '='
stmt += " AND (major_version = %s AND minor_version {} %s)" \
| python | {
"resource": ""
} |
q261004 | _node_to_model | validation | def _node_to_model(tree_or_item, metadata=None, parent=None,
lucent_id=cnxepub.TRANSLUCENT_BINDER_ID):
"""Given a tree, parse to a set of models"""
if 'contents' in tree_or_item:
# It is a binder.
tree = tree_or_item
binder = cnxepub.TranslucentBinder(metadata=tree)
for item in tree['contents']:
node = _node_to_model(item, parent=binder,
lucent_id=lucent_id)
if node.metadata['title'] != item['title']:
| python | {
"resource": ""
} |
q261005 | _reassemble_binder | validation | def _reassemble_binder(id, tree, metadata):
"""Reassemble a Binder object coming out of the database."""
binder = cnxepub.Binder(id, metadata=metadata)
for item in tree['contents']:
node = | python | {
"resource": ""
} |
q261006 | get_moderation | validation | def get_moderation(request):
"""Return the list of publications that need moderation."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT id, created, publisher, publication_message,
(select array_agg(row_to_json(pd))
from pending_documents as pd
| python | {
"resource": ""
} |
q261007 | includeme | validation | def includeme(config):
"""Configures the session manager"""
settings = config.registry.settings
| python | {
"resource": ""
} |
q261008 | admin_print_styles | validation | def admin_print_styles(request):
"""
Returns a dictionary of all unique print_styles, and their latest tag,
revision, and recipe_type.
"""
styles = []
# This fetches all recipes that have been used to successfully bake a
# current book plus all default recipes that have not yet been used
# as well as "bad" books that are not "current" state, but would otherwise
# be the latest/current for that book
with db_connect(cursor_factory=DictCursor) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
WITH latest AS (SELECT print_style, recipe,
count(*), count(nullif(stateid, 1)) as bad
FROM modules m
WHERE portal_type = 'Collection'
AND recipe IS NOT NULL
AND (
baked IS NOT NULL OR (
baked IS NULL AND stateid not in (1,8)
)
)
AND ARRAY [major_version, minor_version] = (
SELECT max(ARRAY[major_version,minor_version]) FROM
modules where m.uuid= uuid)
GROUP BY print_style, recipe
),
defaults AS (SELECT print_style, fileid AS recipe
FROM default_print_style_recipes d
WHERE not exists (SELECT 1
FROM latest WHERE latest.recipe = d.fileid)
)
SELECT coalesce(ps.print_style, '(custom)') as print_style,
ps.title, coalesce(ps.recipe_type, 'web') as type,
ps.revised, ps.tag, ps.commit_id, la.count, la.bad
FROM latest la LEFT JOIN print_style_recipes ps ON
| python | {
"resource": ""
} |
q261009 | get_api_keys | validation | def get_api_keys(request):
"""Return the list of API keys."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT row_to_json(combined_rows) FROM (
SELECT id, | python | {
"resource": ""
} |
q261010 | admin_content_status_single | validation | def admin_content_status_single(request):
"""
Returns a dictionary with all the past baking statuses of a single book.
"""
uuid = request.matchdict['uuid']
try:
UUID(uuid)
except ValueError:
raise httpexceptions.HTTPBadRequest(
'{} is not a valid uuid'.format(uuid))
statement, sql_args = get_baking_statuses_sql({'uuid': uuid})
with db_connect(cursor_factory=DictCursor) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(statement, sql_args)
modules = cursor.fetchall()
if len(modules) == 0:
raise httpexceptions.HTTPBadRequest(
'{} is not a book'.format(uuid))
states = []
collection_info = modules[0]
for row in modules:
message = '' | python | {
"resource": ""
} |
q261011 | _insert_metadata | validation | def _insert_metadata(cursor, model, publisher, message):
"""Insert a module with the given ``metadata``."""
params = model.metadata.copy()
params['publisher'] = publisher
params['publication_message'] = message
params['_portal_type'] = _model_to_portaltype(model)
params['summary'] = str(cnxepub.DocumentSummaryFormatter(model))
# Transform person structs to id lists for database array entry.
for person_field in ATTRIBUTED_ROLE_KEYS:
params[person_field] = [parse_user_uri(x['id'])
for x in params.get(person_field, [])]
params['parent_ident_hash'] = parse_parent_ident_hash(model)
# Assign the id and version if one is known.
if model.ident_hash is not None:
uuid, version = split_ident_hash(model.ident_hash,
split_version=True)
params['_uuid'] = uuid
params['_major_version'], params['_minor_version'] = version
# Lookup legacy ``moduleid``.
cursor.execute("SELECT moduleid FROM latest_modules WHERE uuid = %s",
(uuid,))
# There is the chance that a uuid and version have been set,
# but a previous publication does not exist. Therefore the
# moduleid will not be found. This happens on a pre-publication.
try:
moduleid = cursor.fetchone()[0]
except TypeError: # NoneType
moduleid = None
params['_moduleid'] = moduleid
# Verify that uuid is reserved in document_contols. If not, add it.
cursor.execute("SELECT * from document_controls where uuid = %s",
(uuid,))
try:
cursor.fetchone()[0]
except TypeError: # NoneType
cursor.execute("INSERT INTO document_controls (uuid) VALUES (%s)",
(uuid,))
created = model.metadata.get('created', None)
# Format the statement to accept the identifiers.
stmt = MODULE_INSERTION_TEMPLATE.format(**{
'__uuid__': "%(_uuid)s::uuid",
| python | {
"resource": ""
} |
q261012 | _get_file_sha1 | validation | def _get_file_sha1(file):
"""Return the SHA1 hash of the given a file-like object as ``file``.
This will seek the file back to 0 when it's finished.
"""
| python | {
"resource": ""
} |
q261013 | _insert_file | validation | def _insert_file(cursor, file, media_type):
"""Upsert the ``file`` and ``media_type`` into the files table.
Returns the ``fileid`` and ``sha1`` of the upserted file.
"""
resource_hash = _get_file_sha1(file)
cursor.execute("SELECT fileid FROM files WHERE sha1 = %s",
(resource_hash,))
try:
fileid = cursor.fetchone()[0]
except (IndexError, TypeError):
cursor.execute("INSERT INTO files (file, media_type) "
| python | {
"resource": ""
} |
q261014 | publish | validation | def publish(request):
"""Accept a publication request at form value 'epub'"""
if 'epub' not in request.POST:
raise httpexceptions.HTTPBadRequest("Missing EPUB in POST body.")
is_pre_publication = asbool(request.POST.get('pre-publication'))
epub_upload = request.POST['epub'].file
try:
epub = cnxepub.EPUB.from_file(epub_upload)
except: # noqa: E722
raise httpexceptions.HTTPBadRequest('Format not recognized.')
# Make a publication entry in the database for status checking
# the publication. This also creates publication entries for all
# of the content in the EPUB.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
epub_upload.seek(0) | python | {
"resource": ""
} |
q261015 | get_publication | validation | def get_publication(request):
"""Lookup publication state"""
publication_id = request.matchdict['id']
state, messages = check_publication_state(publication_id)
response_data = {
'publication': | python | {
"resource": ""
} |
q261016 | includeme | validation | def includeme(config):
"""Configures the caching manager"""
global cache_manager
settings = config.registry.settings
| python | {
"resource": ""
} |
q261017 | CaseDict.get | validation | def get(self, key, default=_sentinel):
"""
Gets the value from the key.
If the key doesn't exist, the default value is returned, otherwise None.
:param key: The key
:param default: The default value
:return: The value
"""
| python | {
"resource": ""
} |
q261018 | CaseDict.pop | validation | def pop(self, key, default=_sentinel):
"""
Removes the specified key and returns the corresponding value.
If key is not found, the default is returned if given, otherwise KeyError is raised.
:param key: The key
| python | {
"resource": ""
} |
q261019 | reversals | validation | def reversals(series, left=False, right=False):
"""Iterate reversal points in the series.
A reversal point is a point in the series at which the first derivative
changes sign. Reversal is undefined at the first (last) point because the
derivative before (after) this point is undefined. The first and the last
points may be treated as reversals by setting the optional parameters
`left` and `right` to True.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, yield the first point in the series (treat it as a reversal).
right: bool, optional
If True, yield the last point in the series (treat it as a reversal).
| python | {
"resource": ""
} |
q261020 | _sort_lows_and_highs | validation | def _sort_lows_and_highs(func):
"Decorator for extract_cycles"
@functools.wraps(func)
def wrapper(*args, **kwargs):
for low, high, mult in func(*args, **kwargs):
if low < high:
| python | {
"resource": ""
} |
q261021 | extract_cycles | validation | def extract_cycles(series, left=False, right=False):
"""Iterate cycles in the series.
Parameters
----------
series : iterable sequence of numbers
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Yields
------
cycle : tuple
Each tuple contains three floats (low, high, mult), where low and high
define cycle amplitude and mult equals to 1.0 for full cycles and 0.5
for half cycles.
"""
points = deque()
for x in reversals(series, left=left, right=right):
points.append(x)
while len(points) >= 3:
# Form ranges X and Y from the three most recent points
X = abs(points[-2] - points[-1])
Y = abs(points[-3] - points[-2])
if X < Y:
# Read the next point
break
| python | {
"resource": ""
} |
q261022 | count_cycles | validation | def count_cycles(series, ndigits=None, left=False, right=False):
"""Count cycles in the series.
Parameters
----------
series : iterable sequence of numbers
ndigits : int, optional
Round cycle magnitudes to the given number of digits before counting.
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Returns
-------
A sorted list containing pairs of cycle magnitude and count.
One-half cycles are counted as 0.5, so the returned counts may not be
| python | {
"resource": ""
} |
q261023 | render | validation | def render(node, strict=False):
"""Recipe to render a given FST node.
The FST is composed of branch nodes which are either lists or dicts
and of leaf nodes which are strings. Branch nodes can have other
list, dict or leaf nodes as childs.
To render a string, simply output it. To render a list, render each
of its elements in order. To render a dict, you must follow the
node's entry in the nodes_rendering_order dictionary and its
dependents constraints.
This function hides all this algorithmic complexity by returning
a structured rendering recipe, whatever the type of node. But even
better, you should subclass the RenderWalker which simplifies
drastically working with the rendered FST.
The recipe | python | {
"resource": ""
} |
q261024 | path_to_node | validation | def path_to_node(tree, path):
"""FST node located at the given path"""
if path is None:
return None
node | python | {
"resource": ""
} |
q261025 | PositionFinder.before_constant | validation | def before_constant(self, constant, key):
"""Determine if we're on the targetted node.
If the targetted column is reached, `stop` and `path_found` are
set. If the targetted line is passed, only `stop` is set. This
prevents unnecessary tree travelling when the targetted column
is out of bounds.
"""
newlines_split = split_on_newlines(constant)
for c in newlines_split:
if is_newline(c):
self.current.advance_line()
# if target line is passed
| python | {
"resource": ""
} |
q261026 | get_prefix | validation | def get_prefix(multicodec):
"""
Returns prefix for a given multicodec
:param str multicodec: multicodec codec name
:return: the prefix for the given multicodec
:rtype: byte
:raises ValueError: if an invalid multicodec name is provided
"""
try:
prefix = | python | {
"resource": ""
} |
q261027 | add_prefix | validation | def add_prefix(multicodec, bytes_):
"""
Adds multicodec prefix to the given bytes input
:param str multicodec: multicodec to use for prefixing
:param bytes bytes_: data to prefix
| python | {
"resource": ""
} |
q261028 | remove_prefix | validation | def remove_prefix(bytes_):
"""
Removes prefix from a prefixed data
:param bytes bytes_: multicodec prefixed data bytes
:return: prefix removed data bytes
:rtype: bytes
| python | {
"resource": ""
} |
q261029 | get_codec | validation | def get_codec(bytes_):
"""
Gets the codec used for prefix the multicodec prefixed data
:param bytes bytes_: multicodec prefixed data bytes
:return: name of the multicodec used to prefix
:rtype: str
"""
prefix = extract_prefix(bytes_)
try:
| python | {
"resource": ""
} |
q261030 | capture | validation | def capture(
target_url,
user_agent="archiveis (https://github.com/pastpages/archiveis)",
proxies={}
):
"""
Archives the provided URL using archive.is
Returns the URL where the capture is stored.
"""
# Put together the URL that will save our request
domain = "http://archive.vn"
save_url = urljoin(domain, "/submit/")
# Configure the request headers
headers = {
'User-Agent': user_agent,
"host": "archive.vn",
}
# Request a unique identifier for our activity
logger.debug("Requesting {}".format(domain + "/"))
get_kwargs = dict(
timeout=120,
allow_redirects=True,
headers=headers,
)
if proxies:
get_kwargs['proxies'] = proxies
response = requests.get(domain + "/", **get_kwargs)
response.raise_for_status()
# It will need to be parsed from the homepage response headers
html = str(response.content)
try:
unique_id = html.split('name="submitid', 1)[1].split('value="', 1)[1].split('"', 1)[0]
logger.debug("Unique identifier: {}".format(unique_id))
except IndexError:
logger.warn("Unable to extract unique identifier from archive.is. Submitting without it.")
unique_id = None
# Send the capture request to archive.is with the unique id included
data = {
"url": target_url,
"anyway": 1,
}
if unique_id:
data.update({"submitid": unique_id})
post_kwargs = dict(
timeout=120,
allow_redirects=True,
headers=headers,
data=data
)
if proxies:
post_kwargs['proxies'] = | python | {
"resource": ""
} |
q261031 | cli | validation | def cli(url, user_agent):
"""
Archives the provided URL using archive.is.
"""
kwargs = {}
if user_agent:
kwargs['user_agent'] = | python | {
"resource": ""
} |
q261032 | LiveboxPlayTv.get_channel_image | validation | def get_channel_image(self, channel, img_size=300, skip_cache=False):
"""Get the logo for a channel"""
from bs4 import BeautifulSoup
from wikipedia.exceptions import PageError
import re
import wikipedia
wikipedia.set_lang('fr')
if not channel:
_LOGGER.error('Channel is not set. Could not retrieve image.')
return
# Check if the image is in cache
if channel in self._cache_channel_img and not skip_cache:
img = self._cache_channel_img[channel]
_LOGGER.debug('Cache hit: %s -> %s', channel, img)
return img
channel_info = self.get_channel_info(channel)
query = channel_info['wiki_page']
if not query:
_LOGGER.debug('Wiki page is not set for channel %s', channel)
return
_LOGGER.debug('Query: %s', query)
# If there is a max image size defined use it.
if 'max_img_size' in channel_info:
if img_size > channel_info['max_img_size']:
_LOGGER.info(
'Requested image size is bigger than the max, '
'setting it to %s', channel_info['max_img_size']
| python | {
"resource": ""
} |
q261033 | LessLexer.t_t_eopen | validation | def t_t_eopen(self, t):
r'~"|~\''
if t.value[1] == '"':
t.lexer.push_state('escapequotes')
elif | python | {
"resource": ""
} |
q261034 | LessLexer.t_t_isopen | validation | def t_t_isopen(self, t):
r'"|\''
if t.value[0] == '"':
t.lexer.push_state('istringquotes')
| python | {
"resource": ""
} |
q261035 | LessLexer.t_istringapostrophe_css_string | validation | def t_istringapostrophe_css_string(self, t):
r'[^\'@]+'
| python | {
"resource": ""
} |
q261036 | LessLexer.file | validation | def file(self, filename):
"""
Lex file.
"""
with open(filename) as f:
| python | {
"resource": ""
} |
q261037 | LessLexer.input | validation | def input(self, file):
"""
Load lexer with content from `file` which can be a path or a file
like object.
"""
if isinstance(file, string_types):
with open(file) as f: | python | {
"resource": ""
} |
q261038 | Scope._smixins | validation | def _smixins(self, name):
"""Inner wrapper to search for mixins by name.
"""
| python | {
"resource": ""
} |
q261039 | Scope._blocks | validation | def _blocks(self, name):
"""Inner wrapper to search for blocks by name.
"""
i = len(self)
while i >= 0:
i -= 1
if name in self[i]['__names__']:
for b in self[i]['__blocks__']:
r = b.raw()
if r and r == name:
return b
| python | {
"resource": ""
} |
q261040 | Mixin.parse_args | validation | def parse_args(self, args, scope):
"""Parse arguments to mixin. Add them to scope
as variables. Sets upp special variable @arguments
as well.
args:
args (list): arguments
scope (Scope): current scope
raises:
SyntaxError
"""
arguments = list(zip(args,
[' '] * len(args))) if args and args[0] else None
zl = itertools.zip_longest if sys.version_info[
0] == 3 else itertools.izip_longest
if self.args:
parsed = [
v if hasattr(v, 'parse') else v for v in copy.copy(self.args)
]
args = args if isinstance(args, list) else [args]
vars = [
self._parse_arg(var, arg, scope)
| python | {
"resource": ""
} |
q261041 | Color.mix | validation | def mix(self, color1, color2, weight=50, *args):
"""This algorithm factors in both the user-provided weight
and the difference between the alpha values of the two colors
to decide how to perform the weighted average of the two RGB values.
It works by first normalizing both parameters to be within [-1, 1],
where 1 indicates "only use color1", -1 indicates "only use color 0",
and all values in between indicated a proportionately weighted average.
Once we have the normalized variables w and a,
we apply the formula (w + a)/(1 + w*a)
to get the combined weight (in [-1, 1]) of color1.
This formula has two especially nice properties:
* When either w or a are -1 or 1, the combined weight is also that number
(cases where w * a == -1 are undefined, and handled as a special case).
* When a is 0, the combined weight is w, and vice versa
Finally, the weight of color1 is renormalized to be within [0, 1]
and the weight of color2 is given by 1 minus the weight of color1.
Copyright (c) 2006-2009 Hampton Catlin, Nathan Weizenbaum, and Chris Eppstein
http://sass-lang.com
args:
color1 (str): first color
color2 (str): second color
| python | {
"resource": ""
} |
q261042 | reverse_guard | validation | def reverse_guard(lst):
""" Reverse guard expression. not
(@a > 5) -> (@a =< 5)
Args:
lst | python | {
"resource": ""
} |
q261043 | away_from_zero_round | validation | def away_from_zero_round(value, ndigits=0):
"""Round half-way away from zero.
Python2's round() method.
"""
if sys.version_info[0] >= 3:
p = | python | {
"resource": ""
} |
q261044 | convergent_round | validation | def convergent_round(value, ndigits=0):
"""Convergent rounding.
Round to neareas even, similar to Python3's round() method.
"""
if sys.version_info[0] < 3:
if value < 0.0:
return -convergent_round(-value)
| python | {
"resource": ""
} |
q261045 | permutations_with_replacement | validation | def permutations_with_replacement(iterable, r=None):
"""Return successive r length permutations of elements in the iterable.
Similar to itertools.permutation but withouth | python | {
"resource": ""
} |
q261046 | LessParser.post_parse | validation | def post_parse(self):
""" Post parse cycle. nodejs version allows calls to mixins
not yet defined or known to the parser. We defer all calls
to mixins until after first cycle when all names are known.
"""
if self.result:
out = []
for pu in self.result:
try:
| python | {
"resource": ""
} |
q261047 | NextPage | validation | def NextPage(gh):
"""
Checks if a GitHub call returned multiple pages of data.
:param gh: GitHub() instance
:rtype: int
:return: number of next page or 0 if no next page
"""
header = dict(gh.getheaders())
if 'Link' in header:
parts = header['Link'].split(',')
for part in parts:
subparts = part.split(';')
sub = subparts[1].split('=')
if sub[0].strip() == 'rel':
if sub[1] == '"next"':
| python | {
"resource": ""
} |
q261048 | Fetcher.get_all_tags | validation | def get_all_tags(self):
"""
Fetch all tags for repository from Github.
:return: tags in repository
:rtype: list
"""
verbose = self.options.verbose
gh = self.github
user = self.options.user
repo = self.options.project
if verbose:
print("Fetching tags...")
tags = []
page = 1
while page > 0:
if verbose > 2:
print(".", end="")
| python | {
"resource": ""
} |
q261049 | Fetcher.fetch_closed_pull_requests | validation | def fetch_closed_pull_requests(self):
"""
Fetch all pull requests. We need them to detect "merged_at" parameter
:rtype: list
:return: all pull requests
"""
pull_requests = []
verbose = self.options.verbose
gh = self.github
user = self.options.user
repo = self.options.project
if verbose:
print("Fetching closed pull requests...")
page = 1
while page > 0:
if verbose > 2:
print(".", end="")
if self.options.release_branch:
rc, data = gh.repos[user][repo].pulls.get(
page=page, per_page=PER_PAGE_NUMBER, state='closed',
base=self.options.release_branch
)
else:
rc, data = gh.repos[user][repo].pulls.get(
page=page, per_page=PER_PAGE_NUMBER, | python | {
"resource": ""
} |
q261050 | Fetcher.fetch_repo_creation_date | validation | def fetch_repo_creation_date(self):
"""
Get the creation date of the repository from GitHub.
:rtype: str, str
:return: special tag name, creation date as ISO date string
"""
gh = self.github
user = self.options.user
repo = self.options.project
| python | {
"resource": ""
} |
q261051 | Fetcher.fetch_events_async | validation | def fetch_events_async(self, issues, tag_name):
"""
Fetch events for all issues and add them to self.events
:param list issues: all issues
:param str tag_name: name of the tag to fetch events for
:returns: Nothing
"""
if not issues:
return issues
max_simultaneous_requests = self.options.max_simultaneous_requests
verbose = self.options.verbose
gh = self.github
user = self.options.user
repo = self.options.project
self.events_cnt = 0
if verbose:
print("fetching events for {} {}... ".format(
len(issues), tag_name)
)
def worker(issue):
page = 1
issue['events'] = []
while page > 0:
rc, data = gh.repos[user][repo].issues[
issue['number']].events.get(
page=page, per_page=PER_PAGE_NUMBER)
if rc == 200:
issue['events'].extend(data)
self.events_cnt += len(data)
| python | {
"resource": ""
} |
q261052 | Fetcher.fetch_date_of_tag | validation | def fetch_date_of_tag(self, tag):
"""
Fetch time for tag from repository.
:param dict tag: dictionary with tag information
:rtype: str
:return: time of specified tag as ISO date string
"""
if self.options.verbose > 1:
| python | {
"resource": ""
} |
q261053 | Fetcher.fetch_commit | validation | def fetch_commit(self, event):
"""
Fetch commit data for specified event.
:param dict event: dictionary with event information
:rtype: dict
:return: dictionary with commit data
"""
gh = self.github
user = self.options.user
| python | {
"resource": ""
} |
q261054 | ChangelogGenerator.run | validation | def run(self):
"""
The entry point of this script to generate change log
'ChangelogGeneratorError' Is thrown when one
of the specified tags was not found in list of tags.
"""
if not self.options.project or not self.options.user:
print("Project and/or user missing. "
"For help run:\n pygcgen --help")
return
if not self.options.quiet:
print("Generating changelog...")
log = None
try:
| python | {
"resource": ""
} |
q261055 | parse | validation | def parse(data):
"""
Parse the given ChangeLog data into a list of Hashes.
@param [String] data File data from the ChangeLog.md
@return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => | python | {
"resource": ""
} |
q261056 | DaemonContext._signal_handler_map | validation | def _signal_handler_map(self):
""" Create the signal handler map
create a dictionary with signal:handler mapping based on
self.signal_map
:return: dict
"""
result = {}
| python | {
"resource": ""
} |
q261057 | DaemonContext.open | validation | def open(self):
""" Daemonize this process
Do everything that is needed to become a Unix daemon.
:return: None
:raise: DaemonError
"""
if self.is_open:
return
try:
os.chdir(self.working_directory)
if self.chroot_directory:
os.chroot(self.chroot_directory)
os.setgid(self.gid)
os.setuid(self.uid)
os.umask(self.umask)
except OSError as err:
raise DaemonError('Setting up Environment failed: {0}'
.format(err))
if self.prevent_core:
try:
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except Exception as err:
raise DaemonError('Could not disable core files: {0}'
.format(err))
if self.detach_process:
try:
if os.fork() > 0:
os._exit(0)
| python | {
"resource": ""
} |
q261058 | OptionsParser.user_and_project_from_git | validation | def user_and_project_from_git(self, options, arg0=None, arg1=None):
""" Detects user and project from git. """
user, project = self.user_project_from_option(options, arg0, arg1)
if user and project:
return user, project
try:
remote = subprocess.check_output(
[
'git', 'config', '--get',
'remote.{0}.url'.format(options.git_remote)
| python | {
"resource": ""
} |
q261059 | timestring_to_datetime | validation | def timestring_to_datetime(timestring):
"""
Convert an ISO formated date and time string to a datetime object.
:param str timestring: String with date and time in | python | {
"resource": ""
} |
q261060 | Generator.fetch_events_for_issues_and_pr | validation | def fetch_events_for_issues_and_pr(self):
"""
Fetch event for issues and pull requests
@return [Array] array of fetched issues | python | {
"resource": ""
} |
q261061 | Generator.fetch_tags_dates | validation | def fetch_tags_dates(self):
""" Async fetching of all tags dates. """
if self.options.verbose:
print(
"Fetching dates for {} tags...".format(len(self.filtered_tags))
)
def worker(tag):
self.get_time_of_tag(tag)
# Async fetching tags:
threads = []
max_threads = 50
cnt = len(self.filtered_tags)
for i in range(0, (cnt // max_threads) + 1):
for j in range(max_threads):
idx = i * 50 + j
if idx == cnt:
break
t = threading.Thread(target=worker,
args=(self.filtered_tags[idx],))
| python | {
"resource": ""
} |
q261062 | Generator.detect_actual_closed_dates | validation | def detect_actual_closed_dates(self, issues, kind):
"""
Find correct closed dates, if issues was closed by commits.
:param list issues: issues to check
:param str kind: either "issues" or "pull requests"
:rtype: list
:return: issues with updated closed dates
"""
if self.options.verbose:
print("Fetching closed dates for {} {}...".format(
len(issues), kind)
| python | {
"resource": ""
} |
q261063 | Generator.find_closed_date_by_commit | validation | def find_closed_date_by_commit(self, issue):
"""
Fill "actual_date" parameter of specified issue by closed date of
the commit, if it was closed by commit.
:param dict issue: issue to edit
"""
if not issue.get('events'):
return
# if it's PR -> then find "merged event", in case
# of usual issue -> find closed date
compare_string = "merged" if 'merged_at' in issue else "closed"
# reverse! - to find latest closed event. (event goes in date order)
# if it were reopened and closed again.
issue['events'].reverse()
found_date = False
for event in issue['events']:
| python | {
"resource": ""
} |
q261064 | Generator.set_date_from_event | validation | def set_date_from_event(self, event, issue):
"""
Set closed date from this issue.
:param dict event: event data
:param dict issue: issue data
"""
if not event.get('commit_id', None):
issue['actual_date'] = timestring_to_datetime(issue['closed_at'])
return
try:
commit = self.fetcher.fetch_commit(event)
issue['actual_date'] = timestring_to_datetime(
commit['author']['date']
| python | {
"resource": ""
} |
q261065 | Generator.encapsulate_string | validation | def encapsulate_string(raw_string):
"""
Encapsulate characters to make markdown look as expected.
:param str raw_string: string to encapsulate
:rtype: str
:return: encapsulated input string
"""
| python | {
"resource": ""
} |
q261066 | Generator.compound_changelog | validation | def compound_changelog(self):
"""
Main function to start change log generation
:rtype: str
:return: Generated change log file
"""
self.fetch_and_filter_tags()
tags_sorted = self.sort_tags_by_date(self.filtered_tags)
self.filtered_tags = tags_sorted
self.fetch_and_filter_issues_and_pr()
| python | {
"resource": ""
} |
q261067 | Generator.generate_sub_section | validation | def generate_sub_section(self, issues, prefix):
"""
Generate formated list of issues for changelog.
:param list issues: Issues to put in sub-section.
:param str prefix: Title of sub-section.
:rtype: str
:return: Generated ready-to-add sub-section.
| python | {
"resource": ""
} |
q261068 | Generator.generate_header | validation | def generate_header(self, newer_tag_name, newer_tag_link,
newer_tag_time,
older_tag_link, project_url):
"""
Generate a header for a tag section with specific parameters.
:param str newer_tag_name: Name (title) of newer tag.
:param str newer_tag_link: Tag name of newer tag, used for links.
Could be same as **newer_tag_name** or some
specific value, like `HEAD`.
:param datetime newer_tag_time: Date and time when
newer tag was created.
:param str older_tag_link: Tag name of older tag, used for links.
:param str project_url: URL for current project.
:rtype: str
:return: Generated ready-to-add tag section.
"""
log = ""
# Generate date string:
# noinspection PyUnresolvedReferences
time_string = newer_tag_time.strftime(self.options.date_format)
# Generate tag name and link
if self.options.release_url:
release_url = self.options.release_url.format(newer_tag_link)
else:
release_url = u"{project_url}/tree/{newer_tag_link}".format(
project_url=project_url, newer_tag_link=newer_tag_link)
if not self.options.unreleased_with_date and \
newer_tag_name == self.options.unreleased_label:
log += u"## [{newer_tag_name}]({release_url})\n\n".format(
| python | {
"resource": ""
} |
q261069 | Generator.generate_log_between_tags | validation | def generate_log_between_tags(self, older_tag, newer_tag):
"""
Generate log between 2 specified tags.
:param dict older_tag: All issues before this tag's date will be
excluded. May be special value, if new tag is
the first tag. (Means **older_tag** is when
the repo was created.)
:param dict newer_tag: All issues after this tag's date will be
excluded. May be title of unreleased section.
:rtype: str
:return: Generated ready-to-add tag section for newer tag.
| python | {
"resource": ""
} |
q261070 | Generator.filter_issues_for_tags | validation | def filter_issues_for_tags(self, newer_tag, older_tag):
"""
Apply all filters to issues and pull requests.
:param dict older_tag: All issues before this tag's date will be
excluded. May be special value, if new tag is
the first tag. (Means **older_tag** is when
the repo was created.)
:param dict newer_tag: All issues after this tag's date will be
excluded. May be title of unreleased section.
:rtype: list(dict), list(dict)
:return: Filtered issues and pull requests.
"""
filtered_pull_requests = self.delete_by_time(self.pull_requests,
older_tag, newer_tag)
filtered_issues = self.delete_by_time(self.issues, older_tag,
| python | {
"resource": ""
} |
q261071 | Generator.generate_log_for_all_tags | validation | def generate_log_for_all_tags(self):
"""
The full cycle of generation for whole project.
:rtype: str
:return: The complete change log for released tags.
"""
if self.options.verbose:
print("Generating log...")
self.issues2 = copy.deepcopy(self.issues)
log1 = ""
if self.options.with_unreleased:
log1 = self.generate_unreleased_section()
log = ""
for index in range(len(self.filtered_tags) - 1):
log += self.do_generate_log_for_all_tags_part1(log, index)
| python | {
"resource": ""
} |
q261072 | Generator.generate_unreleased_section | validation | def generate_unreleased_section(self):
"""
Generate log for unreleased closed issues.
:rtype: str
:return: Generated ready-to-add unreleased section.
"""
if not self.filtered_tags:
return ""
now = datetime.datetime.utcnow()
now = now.replace(tzinfo=dateutil.tz.tzutc())
| python | {
"resource": ""
} |
q261073 | Generator.get_string_for_issue | validation | def get_string_for_issue(self, issue):
"""
Parse issue and generate single line formatted issue line.
Example output:
- Add coveralls integration [\#223](https://github.com/skywinder/github-changelog-generator/pull/223) ([skywinder](https://github.com/skywinder))
- Add coveralls integration [\#223](https://github.com/skywinder/github-changelog-generator/pull/223) (@skywinder)
:param dict issue: Fetched issue from GitHub.
:rtype: str
:return: Markdown-formatted single issue.
"""
encapsulated_title = self.encapsulate_string(issue['title'])
try:
title_with_number = u"{0} [\\#{1}]({2})".format(
| python | {
"resource": ""
} |
q261074 | Generator.issue_line_with_user | validation | def issue_line_with_user(self, line, issue):
"""
If option author is enabled, a link to the profile of the author
of the pull reqest will be added to the issue line.
:param str line: String containing a markdown-formatted single issue.
:param dict issue: Fetched issue from GitHub.
:rtype: str
:return: Issue line with added author link.
"""
if not issue.get("pull_request") or not self.options.author:
return line
if not issue.get("user"):
| python | {
"resource": ""
} |
q261075 | Generator.generate_log_for_tag | validation | def generate_log_for_tag(self,
pull_requests,
issues,
newer_tag,
older_tag_name):
"""
Generates log for tag section with header and body.
:param list(dict) pull_requests: List of PR's in this tag section.
:param list(dict) issues: List of issues in this tag section.
:param dict newer_tag: Github data of tag for this section.
:param str older_tag_name: Older tag, used for the links.
May be special value, if **newer tag** is
the first tag. (Means **older_tag** is when
the repo was created.)
:rtype: str
:return: Ready-to-add and parsed tag section.
"""
newer_tag_link, newer_tag_name, \
newer_tag_time = self.detect_link_tag_time(newer_tag)
github_site = "https://github.com" or self.options.github_endpoint
project_url = "{0}/{1}/{2}".format(
| python | {
"resource": ""
} |
q261076 | Generator.issues_to_log | validation | def issues_to_log(self, issues, pull_requests):
"""
Generate ready-to-paste log from list of issues and pull requests.
:param list(dict) issues: List of issues in this tag section.
:param list(dict) pull_requests: List of PR's in this tag section.
:rtype: str
:return: Generated log for issues and pull requests.
"""
log = ""
sections_a, issues_a = self.parse_by_sections(
issues, pull_requests) | python | {
"resource": ""
} |
q261077 | Generator.exclude_issues_by_labels | validation | def exclude_issues_by_labels(self, issues):
"""
Delete all issues with labels from exclude-labels option.
:param list(dict) issues: All issues for tag.
:rtype: list(dict)
:return: Filtered issues.
"""
if not self.options.exclude_labels:
return copy.deepcopy(issues)
remove_issues = set()
exclude_labels = self.options.exclude_labels
include_issues = []
for issue in issues:
for label in issue["labels"]:
| python | {
"resource": ""
} |
q261078 | Generator.find_issues_to_add | validation | def find_issues_to_add(all_issues, tag_name):
"""
Add all issues, that should be in that tag, according to milestone.
:param list(dict) all_issues: All issues.
:param str tag_name: Name (title) of tag.
| python | {
"resource": ""
} |
q261079 | Generator.delete_by_time | validation | def delete_by_time(self, issues, older_tag, newer_tag):
"""
Filter issues that belong to specified tag range.
:param list(dict) issues: Issues to filter.
:param dict older_tag: All issues before this tag's date will be
excluded. May be special value, if **newer_tag**
is the first tag. (Means **older_tag** is when
the repo was created.)
:param dict newer_tag: All issues after this tag's date will | python | {
"resource": ""
} |
q261080 | Generator.include_issues_by_labels | validation | def include_issues_by_labels(self, all_issues):
"""
Include issues with labels, specified in self.options.include_labels.
:param list(dict) all_issues: All issues.
:rtype: list(dict)
:return: Filtered issues.
"""
included_by_labels = self.filter_by_include_labels(all_issues)
wo_labels = self.filter_wo_labels(all_issues)
il = set([f["number"] for f in included_by_labels])
| python | {
"resource": ""
} |
q261081 | Generator.filter_wo_labels | validation | def filter_wo_labels(self, all_issues):
"""
Filter all issues that don't have a label.
:rtype: list(dict)
:return: Issues without labels.
"""
issues_wo_labels = []
if not self.options.add_issues_wo_labels:
| python | {
"resource": ""
} |
q261082 | Generator.filter_by_include_labels | validation | def filter_by_include_labels(self, issues):
"""
Filter issues to include only issues with labels
specified in include_labels.
:param list(dict) issues: Pre-filtered issues.
:rtype: list(dict)
:return: Filtered issues.
| python | {
"resource": ""
} |
q261083 | Generator.get_filtered_pull_requests | validation | def get_filtered_pull_requests(self, pull_requests):
"""
This method fetches missing params for PR and filter them
by specified options. It include add all PR's with labels
from options.include_labels and exclude all from
options.exclude_labels.
:param list(dict) pull_requests: All pull requests.
:rtype: list(dict)
:return: Filtered pull requests.
| python | {
"resource": ""
} |
q261084 | Generator.filter_merged_pull_requests | validation | def filter_merged_pull_requests(self, pull_requests):
"""
This method filter only merged PR and fetch missing required
attributes for pull requests. Using merged date is more correct
than closed date.
:param list(dict) pull_requests: Pre-filtered pull requests.
:rtype: list(dict)
:return:
"""
if self.options.verbose:
print("Fetching merge date for pull requests...")
closed_pull_requests = self.fetcher.fetch_closed_pull_requests()
if not pull_requests:
return []
pulls = copy.deepcopy(pull_requests)
for pr in pulls:
| python | {
"resource": ""
} |
q261085 | Generator.fetch_and_filter_tags | validation | def fetch_and_filter_tags(self):
"""
Fetch and filter tags, fetch dates and sort them in time order.
"""
| python | {
"resource": ""
} |
q261086 | Generator.sort_tags_by_date | validation | def sort_tags_by_date(self, tags):
"""
Sort all tags by date.
:param list(dict) tags: All tags.
:rtype: list(dict)
:return: Sorted list of tags.
"""
if self.options.verbose:
| python | {
"resource": ""
} |
q261087 | Generator.get_time_of_tag | validation | def get_time_of_tag(self, tag):
"""
Get date and time for tag, fetching it if not already cached.
:param dict tag: Tag to get the datetime for.
:rtype: datetime
:return: datetime for specified tag.
"""
if not tag:
raise ChangelogGeneratorError("tag is nil")
name_of_tag = tag["name"]
time_for_name = self.tag_times_dict.get(name_of_tag, None)
if time_for_name:
return time_for_name
else:
time_string = self.fetcher.fetch_date_of_tag(tag)
try:
| python | {
"resource": ""
} |
q261088 | Generator.detect_link_tag_time | validation | def detect_link_tag_time(self, tag):
"""
Detect link, name and time for specified tag.
:param dict tag: Tag data.
:rtype: str, str, datetime
:return: Link, name and time of the tag.
"""
# if tag is nil - set current time
newer_tag_time = self.get_time_of_tag(tag) if tag \
else datetime.datetime.now()
# if it's future release tag - set this value
if tag["name"] == self.options.unreleased_label \
and self.options.future_release:
newer_tag_name = self.options.future_release
newer_tag_link = self.options.future_release
elif tag["name"] is not | python | {
"resource": ""
} |
q261089 | Generator.version_of_first_item | validation | def version_of_first_item(self):
"""
Try to detect the newest tag from self.options.base, otherwise
return a special value indicating the creation of the repo.
:rtype: str
:return: Tag name to use | python | {
"resource": ""
} |
q261090 | Generator.get_temp_tag_for_repo_creation | validation | def get_temp_tag_for_repo_creation(self):
"""
If not already cached, fetch the creation date of the repo, cache it
and return the special value indicating the creation of the repo.
:rtype: str
:return: value indicating the creation
"""
tag_date = self.tag_times_dict.get(REPO_CREATED_TAG_NAME, None)
| python | {
"resource": ""
} |
q261091 | Generator.filter_since_tag | validation | def filter_since_tag(self, all_tags):
"""
Filter tags according since_tag option.
:param list(dict) all_tags: All tags.
:rtype: list(dict)
:return: Filtered tags.
"""
tag = self.detect_since_tag()
if not tag or tag == REPO_CREATED_TAG_NAME:
return copy.deepcopy(all_tags)
filtered_tags = []
tag_names = [t["name"] for t in all_tags]
try:
idx = tag_names.index(tag)
except ValueError:
self.warn_if_tag_not_found(tag, | python | {
"resource": ""
} |
q261092 | Generator.filter_due_tag | validation | def filter_due_tag(self, all_tags):
"""
Filter tags according due_tag option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered_tags = []
tag = self.options.due_tag
tag_names = [t["name"] for t in all_tags]
try:
idx = tag_names.index(tag)
except ValueError:
self.warn_if_tag_not_found(tag, "due-tag")
return copy.deepcopy(all_tags)
| python | {
"resource": ""
} |
q261093 | Generator.filter_between_tags | validation | def filter_between_tags(self, all_tags):
"""
Filter tags according between_tags option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
tag_names = [t["name"] for t in all_tags]
between_tags = []
for tag in self.options.between_tags:
try:
idx = tag_names.index(tag)
except ValueError:
raise ChangelogGeneratorError(
"ERROR: can't find tag {0}, specified with "
"--between-tags option.".format(tag))
between_tags.append(all_tags[idx])
between_tags = self.sort_tags_by_date(between_tags)
if len(between_tags) == 1:
| python | {
"resource": ""
} |
q261094 | Generator.filter_excluded_tags | validation | def filter_excluded_tags(self, all_tags):
"""
Filter tags according exclude_tags and exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered_tags = copy.deepcopy(all_tags)
if self.options.exclude_tags:
| python | {
"resource": ""
} |
q261095 | Generator.apply_exclude_tags_regex | validation | def apply_exclude_tags_regex(self, all_tags):
"""
Filter tags according exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered = []
for tag in all_tags:
| python | {
"resource": ""
} |
q261096 | Generator.apply_exclude_tags | validation | def apply_exclude_tags(self, all_tags):
"""
Filter tags according exclude_tags option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered = copy.deepcopy(all_tags)
for tag in all_tags:
| python | {
"resource": ""
} |
q261097 | parse | validation | def parse(packet):
"""
Parses an APRS packet and returns a dict with decoded data
- All attributes are in metric units
"""
if not isinstance(packet, string_type_parse):
raise TypeError("Expected packet to be str/unicode/bytes, got %s", type(packet))
if len(packet) == 0:
raise ParseError("packet is empty", packet)
# attempt to detect encoding
if isinstance(packet, bytes):
packet = _unicode_packet(packet)
packet = packet.rstrip("\r\n")
logger.debug("Parsing: %s", packet)
# split into head and body
try:
(head, body) = packet.split(':', 1)
except:
raise ParseError("packet has no body", packet)
if len(body) == 0:
raise ParseError("packet body is empty", packet)
parsed = {
'raw': packet,
}
# parse head
try:
parsed.update(parse_header(head))
except ParseError as msg:
raise ParseError(str(msg), packet)
# parse body
packet_type = body[0]
body = body[1:]
if len(body) == 0 and packet_type != '>':
raise ParseError("packet body is empty after packet type character", packet)
# attempt to parse the body
try:
_try_toparse_body(packet_type, body, parsed)
# capture ParseErrors and attach the packet
| python | {
"resource": ""
} |
q261098 | to_decimal | validation | def to_decimal(text):
"""
Takes a base91 char string and returns decimal
"""
if not isinstance(text, string_type):
raise TypeError("expected str or unicode, %s given" % type(text))
if findall(r"[\x00-\x20\x7c-\xff]", text):
raise ValueError("invalid character in sequence")
text = text.lstrip('!')
decimal = 0
| python | {
"resource": ""
} |
q261099 | from_decimal | validation | def from_decimal(number, width=1):
"""
Takes a decimal and returns base91 char string.
With optional parameter for fix with output
"""
text = []
if not isinstance(number, int_type):
raise TypeError("Expected number to be int, got %s", type(number))
elif not isinstance(width, int_type):
raise TypeError("Expected width to be int, got %s", type(number))
elif number < 0:
raise ValueError("Expected | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.