_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q260900 | Registry.get_providers | validation | def get_providers(self, **kwargs):
'''Get all providers registered.
If keyword `ids` is present, get only the providers with these ids.
If keys `subject` is present, get only the providers that have this subject.
.. code-block:: python
# Get all providers with subject 'biology'
registry.get_providers(subject='biology')
# Get all providers with id 1 or 2
registry.get_providers(ids=[1,2])
# Get all providers with id 1 or 2 and subject 'biology'
registry.get_providers(ids=[1,2], subject='biology']
:param list ids: Only return providers with one of the Ids or :term:`URIs <uri>`.
:param str subject: Only return providers with this subject.
:returns: A list of :class:`providers <skosprovider.providers.VocabularyProvider>`
'''
if 'ids' in kwargs:
ids = [self.concept_scheme_uri_map.get(id, id) for id in kwargs['ids']]
providers = [
self.providers[k] for k in self.providers.keys() if k in ids
]
else:
providers = list(self.providers.values())
if 'subject' in kwargs:
providers = [p for p in providers if kwargs['subject'] in p.metadata['subject']]
return providers | python | {
"resource": ""
} |
q260901 | Registry.find | validation | def find(self, query, **kwargs):
'''Launch a query across all or a selection of providers.
.. code-block:: python
# Find anything that has a label of church in any provider.
registry.find({'label': 'church'})
# Find anything that has a label of church with the BUILDINGS provider.
# Attention, this syntax was deprecated in version 0.3.0
registry.find({'label': 'church'}, providers=['BUILDINGS'])
# Find anything that has a label of church with the BUILDINGS provider.
registry.find({'label': 'church'}, providers={'ids': ['BUILDINGS']})
# Find anything that has a label of church with a provider
# marked with the subject 'architecture'.
registry.find({'label': 'church'}, providers={'subject': 'architecture'})
# Find anything that has a label of church in any provider.
# If possible, display the results with a Dutch label.
registry.find({'label': 'church'}, language='nl')
:param dict query: The query parameters that will be passed on to each
:meth:`~skosprovider.providers.VocabularyProvider.find` method of
the selected.
:class:`providers <skosprovider.providers.VocabularyProvider>`.
:param dict providers: Optional. If present, it should be a dictionary.
This dictionary can contain any of the keyword arguments available
to the :meth:`get_providers` method. The query will then only
be passed to the providers confirming to these arguments.
:param string language: Optional. If present, it should be a
:term:`language-tag`. This language-tag is passed on to the
underlying providers and used when selecting the label to display
for each concept.
:returns: a list of :class:`dict`.
Each dict has two keys: id and concepts.
'''
if 'providers' not in kwargs:
providers = self.get_providers()
else:
pargs = kwargs['providers']
if isinstance(pargs, list):
providers = self.get_providers(ids=pargs)
else:
providers = self.get_providers(**pargs)
kwarguments = {}
if 'language' in kwargs:
kwarguments['language'] = kwargs['language']
return [{'id': p.get_vocabulary_id(), 'concepts': p.find(query, **kwarguments)}
for p in providers] | python | {
"resource": ""
} |
q260902 | Registry.get_all | validation | def get_all(self, **kwargs):
'''Get all concepts from all providers.
.. code-block:: python
# get all concepts in all providers.
registry.get_all()
# get all concepts in all providers.
# If possible, display the results with a Dutch label.
registry.get_all(language='nl')
:param string language: Optional. If present, it should be a
:term:`language-tag`. This language-tag is passed on to the
underlying providers and used when selecting the label to display
for each concept.
:returns: a list of :class:`dict`.
Each dict has two keys: id and concepts.
'''
kwarguments = {}
if 'language' in kwargs:
kwarguments['language'] = kwargs['language']
return [{'id': p.get_vocabulary_id(), 'concepts': p.get_all(**kwarguments)}
for p in self.providers.values()] | python | {
"resource": ""
} |
q260903 | Registry.get_by_uri | validation | def get_by_uri(self, uri):
'''Get a concept or collection by its uri.
Returns a single concept or collection if one exists with this uri.
Returns False otherwise.
:param string uri: The uri to find a concept or collection for.
:raises ValueError: The uri is invalid.
:rtype: :class:`skosprovider.skos.Concept` or
:class:`skosprovider.skos.Collection`
'''
if not is_uri(uri):
raise ValueError('%s is not a valid URI.' % uri)
# Check if there's a provider that's more likely to have the URI
csuris = [csuri for csuri in self.concept_scheme_uri_map.keys() if uri.startswith(csuri)]
for csuri in csuris:
c = self.get_provider(csuri).get_by_uri(uri)
if c:
return c
# Check all providers
for p in self.providers.values():
c = p.get_by_uri(uri)
if c:
return c
return False | python | {
"resource": ""
} |
q260904 | upload_backend | validation | def upload_backend(index='dev', user=None):
"""
Build the backend and upload it to the remote server at the given index
"""
get_vars()
use_devpi(index=index)
with fab.lcd('../application'):
fab.local('make upload') | python | {
"resource": ""
} |
q260905 | update_backend | validation | def update_backend(use_pypi=False, index='dev', build=True, user=None, version=None):
"""
Install the backend from the given devpi index at the given version on the target host and restart the service.
If version is None, it defaults to the latest version
Optionally, build and upload the application first from local sources. This requires a
full backend development environment on the machine running this command (pyramid etc.)
"""
get_vars()
if value_asbool(build):
upload_backend(index=index, user=user)
with fab.cd('{apphome}'.format(**AV)):
if value_asbool(use_pypi):
command = 'bin/pip install --upgrade briefkasten'
else:
command = 'bin/pip install --upgrade --pre -i {ploy_default_publish_devpi}/briefkasten/{index}/+simple/ briefkasten'.format(
index=index,
user=user,
**AV)
if version:
command = '%s==%s' % (command, version)
fab.sudo(command)
briefkasten_ctl('restart') | python | {
"resource": ""
} |
q260906 | VocabularyProvider._sort | validation | def _sort(self, concepts, sort=None, language='any', reverse=False):
'''
Returns a sorted version of a list of concepts. Will leave the original
list unsorted.
:param list concepts: A list of concepts and collections.
:param string sort: What to sort on: `id`, `label` or `sortlabel`
:param string language: Language to use when sorting on `label` or
`sortlabel`.
:param boolean reverse: Reverse the sort order?
:rtype: list
'''
sorted = copy.copy(concepts)
if sort:
sorted.sort(key=methodcaller('_sortkey', sort, language), reverse=reverse)
return sorted | python | {
"resource": ""
} |
q260907 | Client.update | validation | async def update(self) -> None:
"""Force update of alarm status and zones"""
_LOGGER.debug("Requesting state update from server (S00, S14)")
await asyncio.gather(
# List unsealed Zones
self.send_command('S00'),
# Arming status update
self.send_command('S14'),
) | python | {
"resource": ""
} |
q260908 | Client._update_loop | validation | async def _update_loop(self) -> None:
"""Schedule a state update to keep the connection alive"""
await asyncio.sleep(self._update_interval)
while not self._closed:
await self.update()
await asyncio.sleep(self._update_interval) | python | {
"resource": ""
} |
q260909 | BELNamespaceManagerMixin._iterate_namespace_models | validation | def _iterate_namespace_models(self, **kwargs) -> Iterable:
"""Return an iterator over the models to be converted to the namespace."""
return tqdm(
self._get_query(self.namespace_model),
total=self._count_model(self.namespace_model),
**kwargs
) | python | {
"resource": ""
} |
q260910 | BELNamespaceManagerMixin._get_default_namespace | validation | def _get_default_namespace(self) -> Optional[Namespace]:
"""Get the reference BEL namespace if it exists."""
return self._get_query(Namespace).filter(Namespace.url == self._get_namespace_url()).one_or_none() | python | {
"resource": ""
} |
q260911 | BELNamespaceManagerMixin._make_namespace | validation | def _make_namespace(self) -> Namespace:
"""Make a namespace."""
namespace = Namespace(
name=self._get_namespace_name(),
keyword=self._get_namespace_keyword(),
url=self._get_namespace_url(),
version=str(time.asctime()),
)
self.session.add(namespace)
entries = self._get_namespace_entries(namespace)
self.session.add_all(entries)
t = time.time()
log.info('committing models')
self.session.commit()
log.info('committed models in %.2f seconds', time.time() - t)
return namespace | python | {
"resource": ""
} |
q260912 | BELNamespaceManagerMixin._get_old_entry_identifiers | validation | def _get_old_entry_identifiers(namespace: Namespace) -> Set[NamespaceEntry]:
"""Convert a PyBEL generalized namespace entries to a set.
Default to using the identifier, but can be overridden to use the name instead.
>>> {term.identifier for term in namespace.entries}
"""
return {term.identifier for term in namespace.entries} | python | {
"resource": ""
} |
q260913 | BELNamespaceManagerMixin._update_namespace | validation | def _update_namespace(self, namespace: Namespace) -> None:
"""Update an already-created namespace.
Note: Only call this if namespace won't be none!
"""
old_entry_identifiers = self._get_old_entry_identifiers(namespace)
new_count = 0
skip_count = 0
for model in self._iterate_namespace_models():
if self._get_identifier(model) in old_entry_identifiers:
continue
entry = self._create_namespace_entry_from_model(model, namespace=namespace)
if entry is None or entry.name is None:
skip_count += 1
continue
new_count += 1
self.session.add(entry)
t = time.time()
log.info('got %d new entries. skipped %d entries missing names. committing models', new_count, skip_count)
self.session.commit()
log.info('committed models in %.2f seconds', time.time() - t) | python | {
"resource": ""
} |
q260914 | BELNamespaceManagerMixin.add_namespace_to_graph | validation | def add_namespace_to_graph(self, graph: BELGraph) -> Namespace:
"""Add this manager's namespace to the graph."""
namespace = self.upload_bel_namespace()
graph.namespace_url[namespace.keyword] = namespace.url
# Add this manager as an annotation, too
self._add_annotation_to_graph(graph)
return namespace | python | {
"resource": ""
} |
q260915 | BELNamespaceManagerMixin._add_annotation_to_graph | validation | def _add_annotation_to_graph(self, graph: BELGraph) -> None:
"""Add this manager as an annotation to the graph."""
if 'bio2bel' not in graph.annotation_list:
graph.annotation_list['bio2bel'] = set()
graph.annotation_list['bio2bel'].add(self.module_name) | python | {
"resource": ""
} |
q260916 | BELNamespaceManagerMixin.upload_bel_namespace | validation | def upload_bel_namespace(self, update: bool = False) -> Namespace:
"""Upload the namespace to the PyBEL database.
:param update: Should the namespace be updated first?
"""
if not self.is_populated():
self.populate()
namespace = self._get_default_namespace()
if namespace is None:
log.info('making namespace for %s', self._get_namespace_name())
return self._make_namespace()
if update:
self._update_namespace(namespace)
return namespace | python | {
"resource": ""
} |
q260917 | BELNamespaceManagerMixin.drop_bel_namespace | validation | def drop_bel_namespace(self) -> Optional[Namespace]:
"""Remove the default namespace if it exists."""
namespace = self._get_default_namespace()
if namespace is not None:
for entry in tqdm(namespace.entries, desc=f'deleting entries in {self._get_namespace_name()}'):
self.session.delete(entry)
self.session.delete(namespace)
log.info('committing deletions')
self.session.commit()
return namespace | python | {
"resource": ""
} |
q260918 | BELNamespaceManagerMixin.write_bel_namespace | validation | def write_bel_namespace(self, file: TextIO, use_names: bool = False) -> None:
"""Write as a BEL namespace file."""
if not self.is_populated():
self.populate()
if use_names and not self.has_names:
raise ValueError
values = (
self._get_namespace_name_to_encoding(desc='writing names')
if use_names else
self._get_namespace_identifier_to_encoding(desc='writing identifiers')
)
write_namespace(
namespace_name=self._get_namespace_name(),
namespace_keyword=self._get_namespace_keyword(),
namespace_query_url=self.identifiers_url,
values=values,
file=file,
) | python | {
"resource": ""
} |
q260919 | BELNamespaceManagerMixin.write_bel_annotation | validation | def write_bel_annotation(self, file: TextIO) -> None:
"""Write as a BEL annotation file."""
if not self.is_populated():
self.populate()
values = self._get_namespace_name_to_encoding(desc='writing names')
write_annotation(
keyword=self._get_namespace_keyword(),
citation_name=self._get_namespace_name(),
description='',
values=values,
file=file,
) | python | {
"resource": ""
} |
q260920 | BELNamespaceManagerMixin.write_bel_namespace_mappings | validation | def write_bel_namespace_mappings(self, file: TextIO, **kwargs) -> None:
"""Write a BEL namespace mapping file."""
json.dump(self._get_namespace_identifier_to_name(**kwargs), file, indent=2, sort_keys=True) | python | {
"resource": ""
} |
q260921 | BELNamespaceManagerMixin.write_directory | validation | def write_directory(self, directory: str) -> bool:
"""Write a BEL namespace for identifiers, names, name hash, and mappings to the given directory."""
current_md5_hash = self.get_namespace_hash()
md5_hash_path = os.path.join(directory, f'{self.module_name}.belns.md5')
if not os.path.exists(md5_hash_path):
old_md5_hash = None
else:
with open(md5_hash_path) as file:
old_md5_hash = file.read().strip()
if old_md5_hash == current_md5_hash:
return False
with open(os.path.join(directory, f'{self.module_name}.belns'), 'w') as file:
self.write_bel_namespace(file, use_names=False)
with open(md5_hash_path, 'w') as file:
print(current_md5_hash, file=file)
if self.has_names:
with open(os.path.join(directory, f'{self.module_name}-names.belns'), 'w') as file:
self.write_bel_namespace(file, use_names=True)
with open(os.path.join(directory, f'{self.module_name}.belns.mapping'), 'w') as file:
self.write_bel_namespace_mappings(file, desc='writing mapping')
return True | python | {
"resource": ""
} |
q260922 | BELNamespaceManagerMixin.get_namespace_hash | validation | def get_namespace_hash(self, hash_fn=hashlib.md5) -> str:
"""Get the namespace hash.
Defaults to MD5.
"""
m = hash_fn()
if self.has_names:
items = self._get_namespace_name_to_encoding(desc='getting hash').items()
else:
items = self._get_namespace_identifier_to_encoding(desc='getting hash').items()
for name, encoding in items:
m.update(f'{name}:{encoding}'.encode('utf8'))
return m.hexdigest() | python | {
"resource": ""
} |
q260923 | get_long_description | validation | def get_long_description():
"""Get the long_description from the README.rst file. Assume UTF-8 encoding."""
with codecs.open(os.path.join(HERE, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
return long_description | python | {
"resource": ""
} |
q260924 | dropbox_post_factory | validation | def dropbox_post_factory(request):
"""receives a UUID via the request and returns either a fresh or an existing dropbox
for it"""
try:
max_age = int(request.registry.settings.get('post_token_max_age_seconds'))
except Exception:
max_age = 300
try:
drop_id = parse_post_token(
token=request.matchdict['token'],
secret=request.registry.settings['post_secret'],
max_age=max_age)
except SignatureExpired:
raise HTTPGone('dropbox expired')
except Exception: # don't be too specific on the reason for the error
raise HTTPNotFound('no such dropbox')
dropbox = request.registry.settings['dropbox_container'].get_dropbox(drop_id)
if dropbox.status_int >= 20:
raise HTTPGone('dropbox already in processing, no longer accepts data')
return dropbox | python | {
"resource": ""
} |
q260925 | dropbox_factory | validation | def dropbox_factory(request):
""" expects the id of an existing dropbox and returns its instance"""
try:
return request.registry.settings['dropbox_container'].get_dropbox(request.matchdict['drop_id'])
except KeyError:
raise HTTPNotFound('no such dropbox') | python | {
"resource": ""
} |
q260926 | dropbox_editor_factory | validation | def dropbox_editor_factory(request):
""" this factory also requires the editor token"""
dropbox = dropbox_factory(request)
if is_equal(dropbox.editor_token, request.matchdict['editor_token'].encode('utf-8')):
return dropbox
else:
raise HTTPNotFound('invalid editor token') | python | {
"resource": ""
} |
q260927 | sanitize_filename | validation | def sanitize_filename(filename):
"""preserve the file ending, but replace the name with a random token """
# TODO: fix broken splitext (it reveals everything of the filename after the first `.` - doh!)
token = generate_drop_id()
name, extension = splitext(filename)
if extension:
return '%s%s' % (token, extension)
else:
return token | python | {
"resource": ""
} |
q260928 | Dropbox.cleanup | validation | def cleanup(self):
""" ensures that no data leaks from drop after processing by
removing all data except the status file"""
try:
remove(join(self.fs_path, u'message'))
remove(join(self.fs_path, 'dirty.zip.pgp'))
except OSError:
pass
shutil.rmtree(join(self.fs_path, u'clean'), ignore_errors=True)
shutil.rmtree(join(self.fs_path, u'attach'), ignore_errors=True) | python | {
"resource": ""
} |
q260929 | Dropbox._create_encrypted_zip | validation | def _create_encrypted_zip(self, source='dirty', fs_target_dir=None):
""" creates a zip file from the drop and encrypts it to the editors.
the encrypted archive is created inside fs_target_dir"""
backup_recipients = [r for r in self.editors if checkRecipient(self.gpg_context, r)]
# this will be handled by watchdog, no need to send for each drop
if not backup_recipients:
self.status = u'500 no valid keys at all'
return self.status
# calculate paths
fs_backup = join(self.fs_path, '%s.zip' % source)
if fs_target_dir is None:
fs_backup_pgp = join(self.fs_path, '%s.zip.pgp' % source)
else:
fs_backup_pgp = join(fs_target_dir, '%s.zip.pgp' % self.drop_id)
fs_source = dict(
dirty=self.fs_dirty_attachments,
clean=self.fs_cleansed_attachments
)
# create archive
with ZipFile(fs_backup, 'w', ZIP_STORED) as backup:
if exists(join(self.fs_path, 'message')):
backup.write(join(self.fs_path, 'message'), arcname='message')
for fs_attachment in fs_source[source]:
backup.write(fs_attachment, arcname=split(fs_attachment)[-1])
# encrypt archive
with open(fs_backup, "rb") as backup:
self.gpg_context.encrypt_file(
backup,
backup_recipients,
always_trust=True,
output=fs_backup_pgp
)
# cleanup
remove(fs_backup)
return fs_backup_pgp | python | {
"resource": ""
} |
q260930 | Dropbox._create_archive | validation | def _create_archive(self):
""" creates an encrypted archive of the dropbox outside of the drop directory.
"""
self.status = u'270 creating final encrypted backup of cleansed attachments'
return self._create_encrypted_zip(source='clean', fs_target_dir=self.container.fs_archive_cleansed) | python | {
"resource": ""
} |
q260931 | Dropbox.size_attachments | validation | def size_attachments(self):
"""returns the number of bytes that the cleansed attachments take up on disk"""
total_size = 0
for attachment in self.fs_cleansed_attachments:
total_size += stat(attachment).st_size
return total_size | python | {
"resource": ""
} |
q260932 | Dropbox.replies | validation | def replies(self):
""" returns a list of strings """
fs_reply_path = join(self.fs_replies_path, 'message_001.txt')
if exists(fs_reply_path):
return [load(open(fs_reply_path, 'r'))]
else:
return [] | python | {
"resource": ""
} |
q260933 | Dropbox.message | validation | def message(self):
""" returns the user submitted text
"""
try:
with open(join(self.fs_path, u'message')) as message_file:
return u''.join([line.decode('utf-8') for line in message_file.readlines()])
except IOError:
return u'' | python | {
"resource": ""
} |
q260934 | Dropbox.fs_dirty_attachments | validation | def fs_dirty_attachments(self):
""" returns a list of absolute paths to the attachements"""
if exists(self.fs_attachment_container):
return [join(self.fs_attachment_container, attachment)
for attachment in listdir(self.fs_attachment_container)]
else:
return [] | python | {
"resource": ""
} |
q260935 | Dropbox.fs_cleansed_attachments | validation | def fs_cleansed_attachments(self):
""" returns a list of absolute paths to the cleansed attachements"""
if exists(self.fs_cleansed_attachment_container):
return [join(self.fs_cleansed_attachment_container, attachment)
for attachment in listdir(self.fs_cleansed_attachment_container)]
else:
return [] | python | {
"resource": ""
} |
q260936 | reset_cleansers | validation | def reset_cleansers(confirm=True):
"""destroys all cleanser slaves and their rollback snapshots, as well as the initial master
snapshot - this allows re-running the jailhost deployment to recreate fresh cleansers."""
if value_asbool(confirm) and not yesno("""\nObacht!
This will destroy any existing and or currently running cleanser jails.
Are you sure that you want to continue?"""):
exit("Glad I asked...")
get_vars()
cleanser_count = AV['ploy_cleanser_count']
# make sure no workers interfere:
fab.run('ezjail-admin stop worker')
# stop and nuke the cleanser slaves
for cleanser_index in range(cleanser_count):
cindex = '{:02d}'.format(cleanser_index + 1)
fab.run('ezjail-admin stop cleanser_{cindex}'.format(cindex=cindex))
with fab.warn_only():
fab.run('zfs destroy tank/jails/cleanser_{cindex}@jdispatch_rollback'.format(cindex=cindex))
fab.run('ezjail-admin delete -fw cleanser_{cindex}'.format(cindex=cindex))
fab.run('umount -f /usr/jails/cleanser_{cindex}'.format(cindex=cindex))
fab.run('rm -rf /usr/jails/cleanser_{cindex}'.format(cindex=cindex))
with fab.warn_only():
# remove master snapshot
fab.run('zfs destroy -R tank/jails/cleanser@clonesource')
# restart worker and cleanser to prepare for subsequent ansible configuration runs
fab.run('ezjail-admin start worker')
fab.run('ezjail-admin stop cleanser')
fab.run('ezjail-admin start cleanser') | python | {
"resource": ""
} |
q260937 | reset_jails | validation | def reset_jails(confirm=True, keep_cleanser_master=True):
""" stops, deletes and re-creates all jails.
since the cleanser master is rather large, that one is omitted by default.
"""
if value_asbool(confirm) and not yesno("""\nObacht!
This will destroy all existing and or currently running jails on the host.
Are you sure that you want to continue?"""):
exit("Glad I asked...")
reset_cleansers(confirm=False)
jails = ['appserver', 'webserver', 'worker']
if not value_asbool(keep_cleanser_master):
jails.append('cleanser')
with fab.warn_only():
for jail in jails:
fab.run('ezjail-admin delete -fw {jail}'.format(jail=jail))
# remove authorized keys for no longer existing key (they are regenerated for each new worker)
fab.run('rm /usr/jails/cleanser/usr/home/cleanser/.ssh/authorized_keys') | python | {
"resource": ""
} |
q260938 | FlaskMixin._add_admin | validation | def _add_admin(self, app, **kwargs):
"""Add a Flask Admin interface to an application.
:param flask.Flask app: A Flask application
:param kwargs: Keyword arguments are passed through to :class:`flask_admin.Admin`
:rtype: flask_admin.Admin
"""
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
admin = Admin(app, **kwargs)
for flask_admin_model in self.flask_admin_models:
if isinstance(flask_admin_model, tuple): # assume its a 2 tuple
if len(flask_admin_model) != 2:
raise TypeError
model, view = flask_admin_model
admin.add_view(view(model, self.session))
else:
admin.add_view(ModelView(flask_admin_model, self.session))
return admin | python | {
"resource": ""
} |
q260939 | dropbox_form | validation | def dropbox_form(request):
""" generates a dropbox uid and renders the submission form with a signed version of that id"""
from briefkasten import generate_post_token
token = generate_post_token(secret=request.registry.settings['post_secret'])
return dict(
action=request.route_url('dropbox_form_submit', token=token),
fileupload_url=request.route_url('dropbox_fileupload', token=token),
**defaults(request)) | python | {
"resource": ""
} |
q260940 | dropbox_fileupload | validation | def dropbox_fileupload(dropbox, request):
""" accepts a single file upload and adds it to the dropbox as attachment"""
attachment = request.POST['attachment']
attached = dropbox.add_attachment(attachment)
return dict(
files=[dict(
name=attached,
type=attachment.type,
)]
) | python | {
"resource": ""
} |
q260941 | dropbox_submission | validation | def dropbox_submission(dropbox, request):
""" handles the form submission, redirects to the dropbox's status page."""
try:
data = dropbox_schema.deserialize(request.POST)
except Exception:
return HTTPFound(location=request.route_url('dropbox_form'))
# set the message
dropbox.message = data.get('message')
# recognize submission from watchdog
if 'testing_secret' in dropbox.settings:
dropbox.from_watchdog = is_equal(
unicode(dropbox.settings['test_submission_secret']),
data.pop('testing_secret', u''))
# a non-js client might have uploaded an attachment via the form's fileupload field:
if data.get('upload') is not None:
dropbox.add_attachment(data['upload'])
# now we can call the process method
dropbox.submit()
drop_url = request.route_url('dropbox_view', drop_id=dropbox.drop_id)
print("Created dropbox %s" % drop_url)
return HTTPFound(location=drop_url) | python | {
"resource": ""
} |
q260942 | make_obo_getter | validation | def make_obo_getter(data_url: str,
data_path: str,
*,
preparsed_path: Optional[str] = None,
) -> Callable[[Optional[str], bool, bool], MultiDiGraph]:
"""Build a function that handles downloading OBO data and parsing it into a NetworkX object.
:param data_url: The URL of the data
:param data_path: The path where the data should get stored
:param preparsed_path: The optional path to cache a pre-parsed json version
"""
download_function = make_downloader(data_url, data_path)
def get_obo(url: Optional[str] = None, cache: bool = True, force_download: bool = False) -> MultiDiGraph:
"""Download and parse a GO obo file with :mod:`obonet` into a MultiDiGraph.
:param url: The URL (or file path) to download.
:param cache: If true, the data is downloaded to the file system, else it is loaded from the internet
:param force_download: If true, overwrites a previously cached file
"""
if preparsed_path is not None and os.path.exists(preparsed_path):
return read_gpickle(preparsed_path)
if url is None and cache:
url = download_function(force_download=force_download)
result = obonet.read_obo(url)
if preparsed_path is not None:
write_gpickle(result, preparsed_path)
return result
return get_obo | python | {
"resource": ""
} |
q260943 | belns | validation | def belns(keyword: str, file: TextIO, encoding: Optional[str], use_names: bool):
"""Write as a BEL namespace."""
directory = get_data_dir(keyword)
obo_url = f'http://purl.obolibrary.org/obo/{keyword}.obo'
obo_path = os.path.join(directory, f'{keyword}.obo')
obo_cache_path = os.path.join(directory, f'{keyword}.obo.pickle')
obo_getter = make_obo_getter(obo_url, obo_path, preparsed_path=obo_cache_path)
graph = obo_getter()
convert_obo_graph_to_belns(
graph,
file=file,
encoding=encoding,
use_names=use_names,
) | python | {
"resource": ""
} |
q260944 | belanno | validation | def belanno(keyword: str, file: TextIO):
"""Write as a BEL annotation."""
directory = get_data_dir(keyword)
obo_url = f'http://purl.obolibrary.org/obo/{keyword}.obo'
obo_path = os.path.join(directory, f'{keyword}.obo')
obo_cache_path = os.path.join(directory, f'{keyword}.obo.pickle')
obo_getter = make_obo_getter(obo_url, obo_path, preparsed_path=obo_cache_path)
graph = obo_getter()
convert_obo_graph_to_belanno(
graph,
file=file,
) | python | {
"resource": ""
} |
q260945 | _store_helper | validation | def _store_helper(model: Action, session: Optional[Session] = None) -> None:
"""Help store an action."""
if session is None:
session = _make_session()
session.add(model)
session.commit()
session.close() | python | {
"resource": ""
} |
q260946 | _make_session | validation | def _make_session(connection: Optional[str] = None) -> Session:
"""Make a session."""
if connection is None:
connection = get_global_connection()
engine = create_engine(connection)
create_all(engine)
session_cls = sessionmaker(bind=engine)
session = session_cls()
return session | python | {
"resource": ""
} |
q260947 | create_all | validation | def create_all(engine, checkfirst=True):
"""Create the tables for Bio2BEL."""
Base.metadata.create_all(bind=engine, checkfirst=checkfirst) | python | {
"resource": ""
} |
q260948 | Action.store_populate | validation | def store_populate(cls, resource: str, session: Optional[Session] = None) -> 'Action':
"""Store a "populate" event.
:param resource: The normalized name of the resource to store
Example:
>>> from bio2bel.models import Action
>>> Action.store_populate('hgnc')
"""
action = cls.make_populate(resource)
_store_helper(action, session=session)
return action | python | {
"resource": ""
} |
q260949 | Action.store_populate_failed | validation | def store_populate_failed(cls, resource: str, session: Optional[Session] = None) -> 'Action':
"""Store a "populate failed" event.
:param resource: The normalized name of the resource to store
Example:
>>> from bio2bel.models import Action
>>> Action.store_populate_failed('hgnc')
"""
action = cls.make_populate_failed(resource)
_store_helper(action, session=session)
return action | python | {
"resource": ""
} |
q260950 | Action.store_drop | validation | def store_drop(cls, resource: str, session: Optional[Session] = None) -> 'Action':
"""Store a "drop" event.
:param resource: The normalized name of the resource to store
Example:
>>> from bio2bel.models import Action
>>> Action.store_drop('hgnc')
"""
action = cls.make_drop(resource)
_store_helper(action, session=session)
return action | python | {
"resource": ""
} |
q260951 | Action.ls | validation | def ls(cls, session: Optional[Session] = None) -> List['Action']:
"""Get all actions."""
if session is None:
session = _make_session()
actions = session.query(cls).order_by(cls.created.desc()).all()
session.close()
return actions | python | {
"resource": ""
} |
q260952 | Action.count | validation | def count(cls, session: Optional[Session] = None) -> int:
"""Count all actions."""
if session is None:
session = _make_session()
count = session.query(cls).count()
session.close()
return count | python | {
"resource": ""
} |
q260953 | get_data_dir | validation | def get_data_dir(module_name: str) -> str:
"""Ensure the appropriate Bio2BEL data directory exists for the given module, then returns the file path.
:param module_name: The name of the module. Ex: 'chembl'
:return: The module's data directory
"""
module_name = module_name.lower()
data_dir = os.path.join(BIO2BEL_DIR, module_name)
os.makedirs(data_dir, exist_ok=True)
return data_dir | python | {
"resource": ""
} |
q260954 | get_module_config_cls | validation | def get_module_config_cls(module_name: str) -> Type[_AbstractModuleConfig]: # noqa: D202
"""Build a module configuration class."""
class ModuleConfig(_AbstractModuleConfig):
NAME = f'bio2bel:{module_name}'
FILES = DEFAULT_CONFIG_PATHS + [
os.path.join(DEFAULT_CONFIG_DIRECTORY, module_name, 'config.ini')
]
return ModuleConfig | python | {
"resource": ""
} |
q260955 | get_connection | validation | def get_connection(module_name: str, connection: Optional[str] = None) -> str:
"""Return the SQLAlchemy connection string if it is set.
Order of operations:
1. Return the connection if given as a parameter
2. Check the environment for BIO2BEL_{module_name}_CONNECTION
3. Look in the bio2bel config file for module-specific connection. Create if doesn't exist. Check the
module-specific section for ``connection``
4. Look in the bio2bel module folder for a config file. Don't create if doesn't exist. Check the default section
for ``connection``
5. Check the environment for BIO2BEL_CONNECTION
6. Check the bio2bel config file for default
7. Fall back to standard default cache connection
:param module_name: The name of the module to get the configuration for
:param connection: get the SQLAlchemy connection string
:return: The SQLAlchemy connection string based on the configuration
"""
# 1. Use given connection
if connection is not None:
return connection
module_name = module_name.lower()
module_config_cls = get_module_config_cls(module_name)
module_config = module_config_cls.load()
return module_config.connection or config.connection | python | {
"resource": ""
} |
q260956 | get_modules | validation | def get_modules() -> Mapping:
"""Get all Bio2BEL modules."""
modules = {}
for entry_point in iter_entry_points(group='bio2bel', name=None):
entry = entry_point.name
try:
modules[entry] = entry_point.load()
except VersionConflict as exc:
log.warning('Version conflict in %s: %s', entry, exc)
continue
except UnknownExtra as exc:
log.warning('Unknown extra in %s: %s', entry, exc)
continue
except ImportError as exc:
log.exception('Issue with importing module %s: %s', entry, exc)
continue
return modules | python | {
"resource": ""
} |
q260957 | clear_cache | validation | def clear_cache(module_name: str, keep_database: bool = True) -> None:
"""Clear all downloaded files."""
data_dir = get_data_dir(module_name)
if not os.path.exists(data_dir):
return
for name in os.listdir(data_dir):
if name in {'config.ini', 'cfg.ini'}:
continue
if name == 'cache.db' and keep_database:
continue
path = os.path.join(data_dir, name)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
os.rmdir(data_dir) | python | {
"resource": ""
} |
q260958 | AbstractManager.drop_all | validation | def drop_all(self, check_first: bool = True):
"""Drop all tables from the database.
:param bool check_first: Defaults to True, only issue DROPs for tables confirmed to be
present in the target database. Defers to :meth:`sqlalchemy.sql.schema.MetaData.drop_all`
"""
self._metadata.drop_all(self.engine, checkfirst=check_first)
self._store_drop() | python | {
"resource": ""
} |
q260959 | label | validation | def label(labels=[], language='any', sortLabel=False):
'''
Provide a label for a list of labels.
The items in the list of labels are assumed to be either instances of
:class:`Label`, or dicts with at least the key `label` in them. These will
be passed to the :func:`dict_to_label` function.
This method tries to find a label by looking if there's
a pref label for the specified language. If there's no pref label,
it looks for an alt label. It disregards hidden labels.
While matching languages, preference will be given to exact matches. But,
if no exact match is present, an inexact match will be attempted. This might
be because a label in language `nl-BE` is being requested, but only `nl` or
even `nl-NL` is present. Similarly, when requesting `nl`, a label with
language `nl-NL` or even `nl-Latn-NL` will also be considered,
providing no label is present that has an exact match with the
requested language.
If language 'any' was specified, all labels will be considered,
regardless of language.
To find a label without a specified language, pass `None` as language.
If a language or None was specified, and no label could be found, this
method will automatically try to find a label in some other language.
Finally, if no label could be found, None is returned.
:param string language: The preferred language to receive the label in. This
should be a valid IANA language tag.
:param boolean sortLabel: Should sortLabels be considered or not? If True,
sortLabels will be preferred over prefLabels. Bear in mind that these
are still language dependent. So, it's possible to have a different
sortLabel per language.
:rtype: A :class:`Label` or `None` if no label could be found.
'''
if not labels:
return None
if not language:
language = 'und'
labels = [dict_to_label(l) for l in labels]
l = False
if sortLabel:
l = find_best_label_for_type(labels, language, 'sortLabel')
if not l:
l = find_best_label_for_type(labels, language, 'prefLabel')
if not l:
l = find_best_label_for_type(labels, language, 'altLabel')
if l:
return l
else:
return label(labels, 'any', sortLabel) if language != 'any' else None | python | {
"resource": ""
} |
q260960 | find_best_label_for_type | validation | def find_best_label_for_type(labels, language, labeltype):
'''
Find the best label for a certain labeltype.
:param list labels: A list of :class:`Label`.
:param str language: An IANA language string, eg. `nl` or `nl-BE`.
:param str labeltype: Type of label to look for, eg. `prefLabel`.
'''
typelabels = [l for l in labels if l.type == labeltype]
if not typelabels:
return False
if language == 'any':
return typelabels[0]
exact = filter_labels_by_language(typelabels, language)
if exact:
return exact[0]
inexact = filter_labels_by_language(typelabels, language, True)
if inexact:
return inexact[0]
return False | python | {
"resource": ""
} |
q260961 | filter_labels_by_language | validation | def filter_labels_by_language(labels, language, broader=False):
'''
Filter a list of labels, leaving only labels of a certain language.
:param list labels: A list of :class:`Label`.
:param str language: An IANA language string, eg. `nl` or `nl-BE`.
:param boolean broader: When true, will also match `nl-BE` when filtering
on `nl`. When false, only exact matches are considered.
'''
if language == 'any':
return labels
if broader:
language = tags.tag(language).language.format
return [l for l in labels if tags.tag(l.language).language.format == language]
else:
language = tags.tag(language).format
return [l for l in labels if tags.tag(l.language).format == language] | python | {
"resource": ""
} |
q260962 | ConceptScheme._sortkey | validation | def _sortkey(self, key='uri', language='any'):
'''
Provide a single sortkey for this conceptscheme.
:param string key: Either `uri`, `label` or `sortlabel`.
:param string language: The preferred language to receive the label in
if key is `label` or `sortlabel`. This should be a valid IANA language tag.
:rtype: :class:`str`
'''
if key == 'uri':
return self.uri
else:
l = label(self.labels, language, key == 'sortlabel')
return l.label.lower() if l else '' | python | {
"resource": ""
} |
q260963 | _iterate_managers | validation | def _iterate_managers(connection, skip):
"""Iterate over instantiated managers."""
for idx, name, manager_cls in _iterate_manage_classes(skip):
if name in skip:
continue
try:
manager = manager_cls(connection=connection)
except TypeError as e:
click.secho(f'Could not instantiate {name}: {e}', fg='red')
else:
yield idx, name, manager | python | {
"resource": ""
} |
q260964 | drop | validation | def drop(connection, skip):
"""Drop all."""
for idx, name, manager in _iterate_managers(connection, skip):
click.secho(f'dropping {name}', fg='cyan', bold=True)
manager.drop_all() | python | {
"resource": ""
} |
q260965 | clear | validation | def clear(skip):
"""Clear all caches."""
for name in sorted(MODULES):
if name in skip:
continue
click.secho(f'clearing cache for {name}', fg='cyan', bold=True)
clear_cache(name) | python | {
"resource": ""
} |
q260966 | sheet | validation | def sheet(connection, skip, file: TextIO):
"""Generate a summary sheet."""
from tabulate import tabulate
header = ['', 'Name', 'Description', 'Terms', 'Relations']
rows = []
for i, (idx, name, manager) in enumerate(_iterate_managers(connection, skip), start=1):
try:
if not manager.is_populated():
continue
except AttributeError:
click.secho(f'{name} does not implement is_populated', fg='red')
continue
terms, relations = None, None
if isinstance(manager, BELNamespaceManagerMixin):
terms = manager._count_model(manager.namespace_model)
if isinstance(manager, BELManagerMixin):
try:
relations = manager.count_relations()
except TypeError as e:
relations = str(e)
rows.append((i, name, manager.__doc__.split('\n')[0].strip().strip('.'), terms, relations))
print(tabulate(
rows,
headers=header,
# tablefmt="fancy_grid",
)) | python | {
"resource": ""
} |
q260967 | web | validation | def web(connection, host, port):
"""Run a combine web interface."""
from bio2bel.web.application import create_application
app = create_application(connection=connection)
app.run(host=host, port=port) | python | {
"resource": ""
} |
q260968 | actions | validation | def actions(connection):
"""List all actions."""
session = _make_session(connection=connection)
for action in Action.ls(session=session):
click.echo(f'{action.created} {action.action} {action.resource}') | python | {
"resource": ""
} |
q260969 | BELManagerMixin.count_relations | validation | def count_relations(self) -> int:
"""Count the number of BEL relations generated."""
if self.edge_model is ...:
raise Bio2BELMissingEdgeModelError('edge_edge model is undefined/count_bel_relations is not overridden')
elif isinstance(self.edge_model, list):
return sum(self._count_model(m) for m in self.edge_model)
else:
return self._count_model(self.edge_model) | python | {
"resource": ""
} |
q260970 | BELManagerMixin.to_indra_statements | validation | def to_indra_statements(self, *args, **kwargs):
"""Dump as a list of INDRA statements.
:rtype: List[indra.Statement]
"""
graph = self.to_bel(*args, **kwargs)
return to_indra_statements(graph) | python | {
"resource": ""
} |
q260971 | _convert_coordinatelist | validation | def _convert_coordinatelist(input_obj):
"""convert from 'list' or 'tuple' object to pgmagick.CoordinateList.
:type input_obj: list or tuple
"""
cdl = pgmagick.CoordinateList()
for obj in input_obj:
cdl.append(pgmagick.Coordinate(obj[0], obj[1]))
return cdl | python | {
"resource": ""
} |
q260972 | _convert_vpathlist | validation | def _convert_vpathlist(input_obj):
"""convert from 'list' or 'tuple' object to pgmagick.VPathList.
:type input_obj: list or tuple
"""
vpl = pgmagick.VPathList()
for obj in input_obj:
# FIXME
obj = pgmagick.PathMovetoAbs(pgmagick.Coordinate(obj[0], obj[1]))
vpl.append(obj)
return vpl | python | {
"resource": ""
} |
q260973 | Image.get_exif_info | validation | def get_exif_info(self):
"""return exif-tag dict
"""
_dict = {}
for tag in _EXIF_TAGS:
ret = self.img.attribute("EXIF:%s" % tag)
if ret and ret != 'unknown':
_dict[tag] = ret
return _dict | python | {
"resource": ""
} |
q260974 | Draw.bezier | validation | def bezier(self, points):
"""Draw a Bezier-curve.
:param points: ex.) ((5, 5), (6, 6), (7, 7))
:type points: list
"""
coordinates = pgmagick.CoordinateList()
for point in points:
x, y = float(point[0]), float(point[1])
coordinates.append(pgmagick.Coordinate(x, y))
self.drawer.append(pgmagick.DrawableBezier(coordinates)) | python | {
"resource": ""
} |
q260975 | Draw.scaling | validation | def scaling(self, x, y):
"""Scaling Draw Object
:param x: 0.0 ~ 1.0
:param y: 0.0 ~ 1.0
"""
self.drawer.append(pgmagick.DrawableScaling(float(x), float(y))) | python | {
"resource": ""
} |
q260976 | Draw.stroke_linecap | validation | def stroke_linecap(self, linecap):
"""set to stroke linecap.
:param linecap: 'undefined', 'butt', 'round', 'square'
:type linecap: str
"""
linecap = getattr(pgmagick.LineCap, "%sCap" % linecap.title())
linecap = pgmagick.DrawableStrokeLineCap(linecap)
self.drawer.append(linecap) | python | {
"resource": ""
} |
q260977 | Draw.stroke_linejoin | validation | def stroke_linejoin(self, linejoin):
"""set to stroke linejoin.
:param linejoin: 'undefined', 'miter', 'round', 'bevel'
:type linejoin: str
"""
linejoin = getattr(pgmagick.LineJoin, "%sJoin" % linejoin.title())
linejoin = pgmagick.DrawableStrokeLineJoin(linejoin)
self.drawer.append(linejoin) | python | {
"resource": ""
} |
q260978 | version | validation | def version():
"""Return version string."""
with io.open('pgmagick/_version.py') as input_file:
for line in input_file:
if line.startswith('__version__'):
return ast.parse(line).body[0].value.s | python | {
"resource": ""
} |
q260979 | delete_license_request | validation | def delete_license_request(request):
"""Submission to remove a license acceptance request."""
uuid_ = request.matchdict['uuid']
posted_uids = [x['uid'] for x in request.json.get('licensors', [])]
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
remove_license_requests(cursor, uuid_, posted_uids)
resp = request.response
resp.status_int = 200
return resp | python | {
"resource": ""
} |
q260980 | delete_roles_request | validation | def delete_roles_request(request):
"""Submission to remove a role acceptance request."""
uuid_ = request.matchdict['uuid']
posted_roles = request.json
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
remove_role_requests(cursor, uuid_, posted_roles)
resp = request.response
resp.status_int = 200
return resp | python | {
"resource": ""
} |
q260981 | delete_acl_request | validation | def delete_acl_request(request):
"""Submission to remove an ACL."""
uuid_ = request.matchdict['uuid']
posted = request.json
permissions = [(x['uid'], x['permission'],) for x in posted]
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
remove_acl(cursor, uuid_, permissions)
resp = request.response
resp.status_int = 200
return resp | python | {
"resource": ""
} |
q260982 | processor | validation | def processor(): # pragma: no cover
"""Churns over PostgreSQL notifications on configured channels.
This requires the application be setup and the registry be available.
This function uses the database connection string and a list of
pre configured channels.
"""
registry = get_current_registry()
settings = registry.settings
connection_string = settings[CONNECTION_STRING]
channels = _get_channels(settings)
# Code adapted from
# http://initd.org/psycopg/docs/advanced.html#asynchronous-notifications
with psycopg2.connect(connection_string) as conn:
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
for channel in channels:
cursor.execute('LISTEN {}'.format(channel))
logger.debug('Waiting for notifications on channel "{}"'
.format(channel))
registry.notify(ChannelProcessingStartUpEvent())
rlist = [conn] # wait until ready for reading
wlist = [] # wait until ready for writing
xlist = [] # wait for an "exceptional condition"
timeout = 5
while True:
if select.select(rlist, wlist, xlist, timeout) != ([], [], []):
conn.poll()
while conn.notifies:
notif = conn.notifies.pop(0)
logger.debug('Got NOTIFY: pid={} channel={} payload={}'
.format(notif.pid, notif.channel,
notif.payload))
event = create_pg_notify_event(notif)
try:
registry.notify(event)
except Exception:
logger.exception('Logging an uncaught exception') | python | {
"resource": ""
} |
q260983 | lookup_api_key_info | validation | def lookup_api_key_info():
"""Given a dbapi cursor, lookup all the api keys and their information."""
info = {}
with db_connect() as conn:
with conn.cursor() as cursor:
cursor.execute(ALL_KEY_INFO_SQL_STMT)
for row in cursor.fetchall():
id, key, name, groups = row
user_id = "api_key:{}".format(id)
info[key] = dict(id=id, user_id=user_id,
name=name, groups=groups)
return info | python | {
"resource": ""
} |
q260984 | includeme | validation | def includeme(config):
"""Configuration include fuction for this module"""
api_key_authn_policy = APIKeyAuthenticationPolicy()
config.include('openstax_accounts')
openstax_authn_policy = config.registry.getUtility(
IOpenstaxAccountsAuthenticationPolicy)
# Set up api & user authentication policies.
policies = [api_key_authn_policy, openstax_authn_policy]
authn_policy = MultiAuthenticationPolicy(policies)
config.set_authentication_policy(authn_policy)
# Set up the authorization policy.
authz_policy = ACLAuthorizationPolicy()
config.set_authorization_policy(authz_policy) | python | {
"resource": ""
} |
q260985 | expandvars_dict | validation | def expandvars_dict(settings):
"""Expands all environment variables in a settings dictionary."""
return dict(
(key, os.path.expandvars(value))
for key, value in settings.iteritems()
) | python | {
"resource": ""
} |
q260986 | task | validation | def task(**kwargs):
"""A function task decorator used in place of ``@celery_app.task``."""
def wrapper(wrapped):
def callback(scanner, name, obj):
celery_app = scanner.config.registry.celery_app
celery_app.task(**kwargs)(obj)
venusian.attach(wrapped, callback)
return wrapped
return wrapper | python | {
"resource": ""
} |
q260987 | _make_celery_app | validation | def _make_celery_app(config):
"""This exposes the celery app. The app is actually created as part
of the configuration. However, this does make the celery app functional
as a stand-alone celery application.
This puts the pyramid configuration object on the celery app to be
used for making the registry available to tasks running inside the
celery worker process pool. See ``CustomTask.__call__``.
"""
# Tack the pyramid config on the celery app for later use.
config.registry.celery_app.conf['pyramid_config'] = config
return config.registry.celery_app | python | {
"resource": ""
} |
q260988 | post_publication_processing | validation | def post_publication_processing(event, cursor):
"""Process post-publication events coming out of the database."""
module_ident, ident_hash = event.module_ident, event.ident_hash
celery_app = get_current_registry().celery_app
# Check baking is not already queued.
cursor.execute('SELECT result_id::text '
'FROM document_baking_result_associations '
'WHERE module_ident = %s', (module_ident,))
for result in cursor.fetchall():
state = celery_app.AsyncResult(result[0]).state
if state in ('QUEUED', 'STARTED', 'RETRY'):
logger.debug('Already queued module_ident={} ident_hash={}'.format(
module_ident, ident_hash))
return
logger.debug('Queued for processing module_ident={} ident_hash={}'.format(
module_ident, ident_hash))
recipe_ids = _get_recipe_ids(module_ident, cursor)
update_module_state(cursor, module_ident, 'processing', recipe_ids[0])
# Commit the state change before preceding.
cursor.connection.commit()
# Start of task
# FIXME Looking up the task isn't the most clear usage here.
task_name = 'cnxpublishing.subscribers.baking_processor'
baking_processor = celery_app.tasks[task_name]
result = baking_processor.delay(module_ident, ident_hash)
baking_processor.backend.store_result(result.id, None, 'QUEUED')
# Save the mapping between a celery task and this event.
track_baking_proc_state(result, module_ident, cursor) | python | {
"resource": ""
} |
q260989 | parse_archive_uri | validation | def parse_archive_uri(uri):
"""Given an archive URI, parse to a split ident-hash."""
parsed = urlparse(uri)
path = parsed.path.rstrip('/').split('/')
ident_hash = path[-1]
ident_hash = unquote(ident_hash)
return ident_hash | python | {
"resource": ""
} |
q260990 | declare_api_routes | validation | def declare_api_routes(config):
"""Declaration of routing"""
add_route = config.add_route
add_route('get-content', '/contents/{ident_hash}')
add_route('get-resource', '/resources/{hash}')
# User actions API
add_route('license-request', '/contents/{uuid}/licensors')
add_route('roles-request', '/contents/{uuid}/roles')
add_route('acl-request', '/contents/{uuid}/permissions')
# Publishing API
add_route('publications', '/publications')
add_route('get-publication', '/publications/{id}')
add_route('publication-license-acceptance',
'/publications/{id}/license-acceptances/{uid}')
add_route('publication-role-acceptance',
'/publications/{id}/role-acceptances/{uid}')
# TODO (8-May-12017) Remove because the term collate is being phased out.
add_route('collate-content', '/contents/{ident_hash}/collate-content')
add_route('bake-content', '/contents/{ident_hash}/baked')
# Moderation routes
add_route('moderation', '/moderations')
add_route('moderate', '/moderations/{id}')
add_route('moderation-rss', '/feeds/moderations.rss')
# API Key routes
add_route('api-keys', '/api-keys')
add_route('api-key', '/api-keys/{id}') | python | {
"resource": ""
} |
q260991 | declare_browsable_routes | validation | def declare_browsable_routes(config):
"""Declaration of routes that can be browsed by users."""
# This makes our routes slashed, which is good browser behavior.
config.add_notfound_view(default_exceptionresponse_view,
append_slash=True)
add_route = config.add_route
add_route('admin-index', '/a/')
add_route('admin-moderation', '/a/moderation/')
add_route('admin-api-keys', '/a/api-keys/')
add_route('admin-add-site-messages', '/a/site-messages/',
request_method='GET')
add_route('admin-add-site-messages-POST', '/a/site-messages/',
request_method='POST')
add_route('admin-delete-site-messages', '/a/site-messages/',
request_method='DELETE')
add_route('admin-edit-site-message', '/a/site-messages/{id}/',
request_method='GET')
add_route('admin-edit-site-message-POST', '/a/site-messages/{id}/',
request_method='POST')
add_route('admin-content-status', '/a/content-status/')
add_route('admin-content-status-single', '/a/content-status/{uuid}')
add_route('admin-print-style', '/a/print-style/')
add_route('admin-print-style-single', '/a/print-style/{style}') | python | {
"resource": ""
} |
q260992 | includeme | validation | def includeme(config):
"""Declare all routes."""
config.include('pyramid_jinja2')
config.add_jinja2_renderer('.html')
config.add_jinja2_renderer('.rss')
config.add_static_view(name='/a/static', path="cnxpublishing:static/")
# Commit the configuration otherwise the jija2_env won't have
# a `globals` assignment.
config.commit()
# Place a few globals in the template environment.
from cnxdb.ident_hash import join_ident_hash
for ext in ('.html', '.rss',):
jinja2_env = config.get_jinja2_environment(ext)
jinja2_env.globals.update(
join_ident_hash=join_ident_hash,
)
declare_api_routes(config)
declare_browsable_routes(config) | python | {
"resource": ""
} |
q260993 | _formatter_callback_factory | validation | def _formatter_callback_factory(): # pragma: no cover
"""Returns a list of includes to be given to `cnxepub.collation.collate`.
"""
includes = []
exercise_url_template = '{baseUrl}/api/exercises?q={field}:"{{itemCode}}"'
settings = get_current_registry().settings
exercise_base_url = settings.get('embeddables.exercise.base_url', None)
exercise_matches = [match.split(',', 1) for match in aslist(
settings.get('embeddables.exercise.match', ''), flatten=False)]
exercise_token = settings.get('embeddables.exercise.token', None)
mathml_url = settings.get('mathmlcloud.url', None)
memcache_servers = settings.get('memcache_servers')
if memcache_servers:
memcache_servers = memcache_servers.split()
else:
memcache_servers = None
if exercise_base_url and exercise_matches:
mc_client = None
if memcache_servers:
mc_client = memcache.Client(memcache_servers, debug=0)
for (exercise_match, exercise_field) in exercise_matches:
template = exercise_url_template.format(
baseUrl=exercise_base_url, field=exercise_field)
includes.append(exercise_callback_factory(exercise_match,
template,
mc_client,
exercise_token,
mathml_url))
return includes | python | {
"resource": ""
} |
q260994 | bake | validation | def bake(binder, recipe_id, publisher, message, cursor):
"""Given a `Binder` as `binder`, bake the contents and
persist those changes alongside the published content.
"""
recipe = _get_recipe(recipe_id, cursor)
includes = _formatter_callback_factory()
binder = collate_models(binder, ruleset=recipe, includes=includes)
def flatten_filter(model):
return (isinstance(model, cnxepub.CompositeDocument) or
(isinstance(model, cnxepub.Binder) and
model.metadata.get('type') == 'composite-chapter'))
def only_documents_filter(model):
return isinstance(model, cnxepub.Document) \
and not isinstance(model, cnxepub.CompositeDocument)
for doc in cnxepub.flatten_to(binder, flatten_filter):
publish_composite_model(cursor, doc, binder, publisher, message)
for doc in cnxepub.flatten_to(binder, only_documents_filter):
publish_collated_document(cursor, doc, binder)
tree = cnxepub.model_to_tree(binder)
publish_collated_tree(cursor, tree)
return [] | python | {
"resource": ""
} |
q260995 | db_connect | validation | def db_connect(connection_string=None, **kwargs):
"""Function to supply a database connection object."""
if connection_string is None:
connection_string = get_current_registry().settings[CONNECTION_STRING]
db_conn = psycopg2.connect(connection_string, **kwargs)
try:
with db_conn:
yield db_conn
finally:
db_conn.close() | python | {
"resource": ""
} |
q260996 | with_db_cursor | validation | def with_db_cursor(func):
"""Decorator that supplies a cursor to the function.
This passes in a psycopg2 Cursor as the argument 'cursor'.
It also accepts a cursor if one is given.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
if 'cursor' in kwargs or func.func_code.co_argcount == len(args):
return func(*args, **kwargs)
with db_connect() as db_connection:
with db_connection.cursor() as cursor:
kwargs['cursor'] = cursor
return func(*args, **kwargs)
return wrapped | python | {
"resource": ""
} |
q260997 | _dissect_roles | validation | def _dissect_roles(metadata):
"""Given a model's ``metadata``, iterate over the roles.
Return values are the role identifier and role type as a tuple.
"""
for role_key in cnxepub.ATTRIBUTED_ROLE_KEYS:
for user in metadata.get(role_key, []):
if user['type'] != 'cnx-id':
raise ValueError("Archive only accepts Connexions users.")
uid = parse_user_uri(user['id'])
yield uid, role_key
raise StopIteration() | python | {
"resource": ""
} |
q260998 | obtain_licenses | validation | def obtain_licenses():
"""Obtain the licenses in a dictionary form, keyed by url."""
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
SELECT combined_row.url, row_to_json(combined_row) FROM (
SELECT "code", "version", "name", "url", "is_valid_for_publication"
FROM licenses) AS combined_row""")
licenses = {r[0]: r[1] for r in cursor.fetchall()}
return licenses | python | {
"resource": ""
} |
q260999 | _validate_license | validation | def _validate_license(model):
"""Given the model, check the license is one valid for publication."""
license_mapping = obtain_licenses()
try:
license_url = model.metadata['license_url']
except KeyError:
raise exceptions.MissingRequiredMetadata('license_url')
try:
license = license_mapping[license_url]
except KeyError:
raise exceptions.InvalidLicense(license_url)
if not license['is_valid_for_publication']:
raise exceptions.InvalidLicense(license_url) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.