text stringlengths 81 112k |
|---|
Get list of sets.
def sets(self):
"""Get list of sets."""
if self.cache:
return self.cache.get(
self.app.config['OAISERVER_CACHE_KEY']) |
Set list of sets.
def sets(self, values):
"""Set list of sets."""
# if cache server is configured, save sets list
if self.cache:
self.cache.set(self.app.config['OAISERVER_CACHE_KEY'], values) |
Register signals.
def register_signals(self):
"""Register signals."""
from .receivers import OAIServerUpdater
# Register Record signals to update OAI informations
self.update_function = OAIServerUpdater()
records_signals.before_record_insert.connect(self.update_function,
weak=False)
records_signals.before_record_update.connect(self.update_function,
weak=False)
if self.app.config['OAISERVER_REGISTER_SET_SIGNALS']:
self.register_signals_oaiset() |
Register OAISet signals to update records.
def register_signals_oaiset(self):
"""Register OAISet signals to update records."""
from .models import OAISet
from .receivers import after_insert_oai_set, \
after_update_oai_set, after_delete_oai_set
listen(OAISet, 'after_insert', after_insert_oai_set)
listen(OAISet, 'after_update', after_update_oai_set)
listen(OAISet, 'after_delete', after_delete_oai_set) |
Unregister signals.
def unregister_signals(self):
"""Unregister signals."""
# Unregister Record signals
if hasattr(self, 'update_function'):
records_signals.before_record_insert.disconnect(
self.update_function)
records_signals.before_record_update.disconnect(
self.update_function)
self.unregister_signals_oaiset() |
Unregister signals oaiset.
def unregister_signals_oaiset(self):
"""Unregister signals oaiset."""
from .models import OAISet
from .receivers import after_insert_oai_set, \
after_update_oai_set, after_delete_oai_set
if contains(OAISet, 'after_insert', after_insert_oai_set):
remove(OAISet, 'after_insert', after_insert_oai_set)
remove(OAISet, 'after_update', after_update_oai_set)
remove(OAISet, 'after_delete', after_delete_oai_set) |
Initialize configuration.
:param app: An instance of :class:`flask.Flask`.
def init_config(self, app):
"""Initialize configuration.
:param app: An instance of :class:`flask.Flask`.
"""
app.config.setdefault(
'OAISERVER_BASE_TEMPLATE',
app.config.get('BASE_TEMPLATE',
'invenio_oaiserver/base.html'))
app.config.setdefault(
'OAISERVER_REPOSITORY_NAME',
app.config.get('THEME_SITENAME',
'Invenio-OAIServer'))
# warn user if ID_PREFIX is not set
if 'OAISERVER_ID_PREFIX' not in app.config:
import socket
import warnings
app.config.setdefault(
'OAISERVER_ID_PREFIX',
'oai:{0}:recid/'.format(socket.gethostname()))
warnings.warn(
"""Please specify the OAISERVER_ID_PREFIX configuration."""
"""default value is: {0}""".format(
app.config.get('OAISERVER_ID_PREFIX')))
for k in dir(config):
if k.startswith('OAISERVER_'):
app.config.setdefault(k, getattr(config, k)) |
Extracts the values of a set of parameters, recursing into nested dictionaries.
def extract_params(params):
"""
Extracts the values of a set of parameters, recursing into nested dictionaries.
"""
values = []
if isinstance(params, dict):
for key, value in params.items():
values.extend(extract_params(value))
elif isinstance(params, list):
for value in params:
values.extend(extract_params(value))
else:
values.append(params)
return values |
Returns the unhashed signature string (secret + sorted list of param values) for an API call.
@param params: dictionary values to generate signature string
@param secret: secret string
def get_signature_string(params, secret):
"""
Returns the unhashed signature string (secret + sorted list of param values) for an API call.
@param params: dictionary values to generate signature string
@param secret: secret string
"""
str_list = [str(item) for item in extract_params(params)]
str_list.sort()
return (secret + ''.join(str_list)).encode('utf-8') |
Remotely send an email template to a single email address.
http://docs.sailthru.com/api/send
@param template: template string
@param email: Email value
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param limit: optional dictionary to name, time, and handle conflicts of limits
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
def send(self, template, email, _vars=None, options=None, schedule_time=None, limit=None):
"""
Remotely send an email template to a single email address.
http://docs.sailthru.com/api/send
@param template: template string
@param email: Email value
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param limit: optional dictionary to name, time, and handle conflicts of limits
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
"""
_vars = _vars or {}
options = options or {}
data = {'template': template,
'email': email,
'vars': _vars,
'options': options.copy()}
if limit:
data['limit'] = limit.copy()
if schedule_time is not None:
data['schedule_time'] = schedule_time
return self.api_post('send', data) |
Remotely send an email template to multiple email addresses.
http://docs.sailthru.com/api/send
@param template: template string
@param emails: List with email values or comma separated email string
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
def multi_send(self, template, emails, _vars=None, evars=None, schedule_time=None, options=None):
"""
Remotely send an email template to multiple email addresses.
http://docs.sailthru.com/api/send
@param template: template string
@param emails: List with email values or comma separated email string
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion
"""
_vars = _vars or {}
evars = evars or {}
options = options or {}
data = {'template': template,
'email': ','.join(emails) if isinstance(emails, list) else emails,
'vars': _vars.copy(),
'evars': evars.copy(),
'options': options.copy()}
if schedule_time is not None:
data['schedule_time'] = schedule_time
return self.api_post('send', data) |
DEPRECATED!
Update information about one of your users, including adding and removing the user from lists.
http://docs.sailthru.com/api/email
def set_email(self, email, _vars=None, lists=None, templates=None, verified=0, optout=None, send=None, send_vars=None):
"""
DEPRECATED!
Update information about one of your users, including adding and removing the user from lists.
http://docs.sailthru.com/api/email
"""
_vars = _vars or {}
lists = lists or []
templates = templates or []
send_vars = send_vars or []
data = {'email': email,
'vars': _vars.copy(),
'lists': lists,
'templates': templates,
'verified': int(verified)}
if optout is not None:
data['optout'] = optout
if send is not None:
data['send'] = send
if send_vars:
data['send_vars'] = send_vars
return self.api_post('email', data) |
get user by a given id
http://getstarted.sailthru.com/api/user
def get_user(self, idvalue, options=None):
"""
get user by a given id
http://getstarted.sailthru.com/api/user
"""
options = options or {}
data = options.copy()
data['id'] = idvalue
return self.api_get('user', data) |
save user by a given id
http://getstarted.sailthru.com/api/user
def save_user(self, idvalue, options=None):
"""
save user by a given id
http://getstarted.sailthru.com/api/user
"""
options = options or {}
data = options.copy()
data['id'] = idvalue
return self.api_post('user', data) |
Schedule a mass mail blast
http://docs.sailthru.com/api/blast
@param name: name to give to this new blast
@param list: mailing list name to send to
@param schedule_time: when the blast should send. Dates in the past will be scheduled for immediate delivery. Any English textual datetime format known to PHP's strtotime function is acceptable, such as 2009-03-18 23:57:22 UTC, now (immediate delivery), +3 hours (3 hours from now), or February 14, 9:30 EST. Be sure to specify a timezone if you use an exact time.
@param from_name: name appearing in the "From" of the email
@param from_email: email address to use as the "from" - choose from any of your verified emails
@param subject: subject line of the email
@param content_html: HTML format version of the email
@param content_text: Text format version of the email
@param options: optional parameters dictionary
blast_id
copy_blast
copy_template
replyto
report_email
is_link_tracking
is_google_analytics
is_public
suppress_list
test_vars
email_hour_range
abtest
test_percent
data_feed_url
def schedule_blast(self, name, list, schedule_time, from_name, from_email, subject, content_html, content_text, options=None):
"""
Schedule a mass mail blast
http://docs.sailthru.com/api/blast
@param name: name to give to this new blast
@param list: mailing list name to send to
@param schedule_time: when the blast should send. Dates in the past will be scheduled for immediate delivery. Any English textual datetime format known to PHP's strtotime function is acceptable, such as 2009-03-18 23:57:22 UTC, now (immediate delivery), +3 hours (3 hours from now), or February 14, 9:30 EST. Be sure to specify a timezone if you use an exact time.
@param from_name: name appearing in the "From" of the email
@param from_email: email address to use as the "from" - choose from any of your verified emails
@param subject: subject line of the email
@param content_html: HTML format version of the email
@param content_text: Text format version of the email
@param options: optional parameters dictionary
blast_id
copy_blast
copy_template
replyto
report_email
is_link_tracking
is_google_analytics
is_public
suppress_list
test_vars
email_hour_range
abtest
test_percent
data_feed_url
"""
options = options or {}
data = options.copy()
data['name'] = name
data['list'] = list
data['schedule_time'] = schedule_time
data['from_name'] = from_name
data['from_email'] = from_email
data['subject'] = subject
data['content_html'] = content_html
data['content_text'] = content_text
return self.api_post('blast', data) |
Schedule a mass mail blast from template
http://docs.sailthru.com/api/blast
@param template: template to copy from
@param list_name: list to send to
@param schedule_time
@param options: additional optional params
def schedule_blast_from_template(self, template, list_name, schedule_time, options=None):
"""
Schedule a mass mail blast from template
http://docs.sailthru.com/api/blast
@param template: template to copy from
@param list_name: list to send to
@param schedule_time
@param options: additional optional params
"""
options = options or {}
data = options.copy()
data['copy_template'] = template
data['list'] = list_name
data['schedule_time'] = schedule_time
return self.api_post('blast', data) |
Schedule a mass mail blast from previous blast
http://docs.sailthru.com/api/blast
@param blast_id: blast_id to copy from
@param schedule_time
@param options: additional optional params
def schedule_blast_from_blast(self, blast_id, schedule_time, options=None):
"""
Schedule a mass mail blast from previous blast
http://docs.sailthru.com/api/blast
@param blast_id: blast_id to copy from
@param schedule_time
@param options: additional optional params
"""
options = options or {}
data = options.copy()
data['copy_blast'] = blast_id
data['schedule_time'] = schedule_time
return self.api_post('blast', data) |
Get detailed metadata information about a list.
def get_list(self, list_name, options=None):
"""
Get detailed metadata information about a list.
"""
options = options or {}
data = {'list': list_name}
data.update(options)
return self.api_get('list', data) |
Upload a list. The list import job is queued and will happen shortly after the API request.
http://docs.sailthru.com/api/list
@param list: list name
@param emails: List of email values or comma separated string
def save_list(self, list_name, emails):
"""
Upload a list. The list import job is queued and will happen shortly after the API request.
http://docs.sailthru.com/api/list
@param list: list name
@param emails: List of email values or comma separated string
"""
data = {'list': list_name,
'emails': ','.join(emails) if isinstance(emails, list) else emails}
return self.api_post('list', data) |
Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail.
def import_contacts(self, email, password, include_name=False):
"""
Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail.
"""
data = {'email': email,
'password': password}
if include_name:
data['names'] = 1
return self.api_post('contacts', data) |
Push a new piece of content to Sailthru.
Expected names for the `images` argument's map are "full" and "thumb"
Expected format for `location` should be [longitude,latitude]
@param title: title string for the content
@param url: URL string for the content
@param images: map of image names
@param date: date string
@param expire_date: date string for when the content expires
@param description: description for the content
@param location: location of the content
@param price: price for the content
@param tags: list or comma separated string values
@param author: author for the content
@param site_name: site name for the content
@param spider: truthy value to force respidering content
@param vars: replaceable vars dictionary
def push_content(self, title, url,
images=None, date=None, expire_date=None,
description=None, location=None, price=None,
tags=None,
author=None, site_name=None,
spider=None, vars=None):
"""
Push a new piece of content to Sailthru.
Expected names for the `images` argument's map are "full" and "thumb"
Expected format for `location` should be [longitude,latitude]
@param title: title string for the content
@param url: URL string for the content
@param images: map of image names
@param date: date string
@param expire_date: date string for when the content expires
@param description: description for the content
@param location: location of the content
@param price: price for the content
@param tags: list or comma separated string values
@param author: author for the content
@param site_name: site name for the content
@param spider: truthy value to force respidering content
@param vars: replaceable vars dictionary
"""
vars = vars or {}
data = {'title': title,
'url': url}
if images is not None:
data['images'] = images
if date is not None:
data['date'] = date
if expire_date is not None:
data['expire_date'] = date
if location is not None:
data['location'] = date
if price is not None:
data['price'] = price
if description is not None:
data['description'] = description
if site_name is not None:
data['site_name'] = images
if author is not None:
data['author'] = author
if spider:
data['spider'] = 1
if tags is not None:
data['tags'] = ",".join(tags) if isinstance(tags, list) else tags
if len(vars) > 0:
data['vars'] = vars.copy()
return self.api_post('content', data) |
Add a new alert to a user. You can add either a realtime or a summary alert (daily/weekly).
http://docs.sailthru.com/api/alert
Usage:
email = 'praj@sailthru.com'
type = 'weekly'
template = 'default'
when = '+5 hours'
alert_options = {'match': {}, 'min': {}, 'max': {}, 'tags': []}
alert_options['match']['type'] = 'shoes'
alert_options['min']['price'] = 20000 #cents
alert_options['tags'] = ['red', 'blue', 'green']
response = client.save_alert(email, type, template, when, alert_options)
@param email: Email value
@param type: daily|weekly|realtime
@param template: template name
@param when: date string required for summary alert (daily/weekly)
@param options: dictionary value for adding tags, max price, min price, match type
def save_alert(self, email, type, template, when=None, options=None):
"""
Add a new alert to a user. You can add either a realtime or a summary alert (daily/weekly).
http://docs.sailthru.com/api/alert
Usage:
email = 'praj@sailthru.com'
type = 'weekly'
template = 'default'
when = '+5 hours'
alert_options = {'match': {}, 'min': {}, 'max': {}, 'tags': []}
alert_options['match']['type'] = 'shoes'
alert_options['min']['price'] = 20000 #cents
alert_options['tags'] = ['red', 'blue', 'green']
response = client.save_alert(email, type, template, when, alert_options)
@param email: Email value
@param type: daily|weekly|realtime
@param template: template name
@param when: date string required for summary alert (daily/weekly)
@param options: dictionary value for adding tags, max price, min price, match type
"""
options = options or {}
data = options.copy()
data['email'] = email
data['type'] = type
data['template'] = template
if type in ['weekly', 'daily']:
data['when'] = when
return self.api_post('alert', data) |
delete user alert
def delete_alert(self, email, alert_id):
"""
delete user alert
"""
data = {'email': email,
'alert_id': alert_id}
return self.api_delete('alert', data) |
Record that a user has made a purchase, or has added items to their purchase total.
http://docs.sailthru.com/api/purchase
@param email: Email string
@param items: list of item dictionary with keys: id, title, price, qty, and url
@param message_id: message_id string
@param extid: external ID to track purchases
@param options: other options that can be set as per the API documentation
def purchase(self, email, items=None, incomplete=None, message_id=None, options=None, extid=None):
"""
Record that a user has made a purchase, or has added items to their purchase total.
http://docs.sailthru.com/api/purchase
@param email: Email string
@param items: list of item dictionary with keys: id, title, price, qty, and url
@param message_id: message_id string
@param extid: external ID to track purchases
@param options: other options that can be set as per the API documentation
"""
items = items or {}
options = options or {}
data = options.copy()
data['email'] = email
data['items'] = items
if incomplete is not None:
data['incomplete'] = incomplete
if message_id is not None:
data['message_id'] = message_id
if extid is not None:
data['extid'] = extid
return self.api_post('purchase', data) |
Retrieve information about a purchase using the system's unique ID or a client's ID
@param id_: a string that represents a unique_id or an extid.
@param key: a string that is either 'sid' or 'extid'.
def get_purchase(self, purchase_id, purchase_key='sid'):
"""
Retrieve information about a purchase using the system's unique ID or a client's ID
@param id_: a string that represents a unique_id or an extid.
@param key: a string that is either 'sid' or 'extid'.
"""
data = {'purchase_id': purchase_id,
'purchase_key': purchase_key}
return self.api_get('purchase', data) |
Retrieve information about your subscriber counts on a particular list, on a particular day.
http://docs.sailthru.com/api/stat
def stats_list(self, list=None, date=None, headers=None):
"""
Retrieve information about your subscriber counts on a particular list, on a particular day.
http://docs.sailthru.com/api/stat
"""
data = {'stat': 'list'}
if list is not None:
data['list'] = list
if date is not None:
data['date'] = date
return self._stats(data, headers) |
Retrieve information about a particular blast or aggregated information from all of blasts over a specified date range.
http://docs.sailthru.com/api/stat
def stats_blast(self, blast_id=None, start_date=None, end_date=None, options=None):
"""
Retrieve information about a particular blast or aggregated information from all of blasts over a specified date range.
http://docs.sailthru.com/api/stat
"""
options = options or {}
data = options.copy()
if blast_id is not None:
data['blast_id'] = blast_id
if start_date is not None:
data['start_date'] = start_date
if end_date is not None:
data['end_date'] = end_date
data['stat'] = 'blast'
return self._stats(data) |
Retrieve information about a particular transactional or aggregated information
from transactionals from that template over a specified date range.
http://docs.sailthru.com/api/stat
def stats_send(self, template, start_date, end_date, options=None):
"""
Retrieve information about a particular transactional or aggregated information
from transactionals from that template over a specified date range.
http://docs.sailthru.com/api/stat
"""
options = options or {}
data = options.copy()
data = {'template': template,
'start_date': start_date,
'end_date': end_date
}
data['stat'] = 'send'
return self._stats(data) |
Returns true if the incoming request is an authenticated verify post.
def receive_verify_post(self, post_params):
"""
Returns true if the incoming request is an authenticated verify post.
"""
if isinstance(post_params, dict):
required_params = ['action', 'email', 'send_id', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'verify':
return False
sig = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if sig != get_signature_hash(post_params, self.secret):
return False
send_response = self.get_send(post_params['send_id'])
try:
send_body = send_response.get_body()
send_json = json.loads(send_body)
if 'email' not in send_body:
return False
if send_json['email'] != post_params['email']:
return False
except ValueError:
return False
return True |
Update postbacks
def receive_update_post(self, post_params):
"""
Update postbacks
"""
if isinstance(post_params, dict):
required_params = ['action', 'email', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'update':
return False
signature = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if signature != get_signature_hash(post_params, self.secret):
return False
return True |
Hard bounce postbacks
def receive_hardbounce_post(self, post_params):
"""
Hard bounce postbacks
"""
if isinstance(post_params, dict):
required_params = ['action', 'email', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'hardbounce':
return False
signature = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if signature != get_signature_hash(post_params, self.secret):
return False
# for sends
if 'send_id' in post_params:
send_id = post_params['send_id']
send_response = self.get_send(send_id)
if not send_response.is_ok():
return False
send_obj = send_response.get_body()
if not send_obj or 'email' not in send_obj:
return False
# for blasts
if 'blast_id' in post_params:
blast_id = post_params['blast_id']
blast_response = self.get_blast(blast_id)
if not blast_response.is_ok():
return False
blast_obj = blast_response.get_body()
if not blast_obj:
return False
return True |
checks if post_params contain required keys
def check_for_valid_postback_actions(self, required_keys, post_params):
"""
checks if post_params contain required keys
"""
for key in required_keys:
if key not in post_params:
return False
return True |
Perform an HTTP GET request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
def api_get(self, action, data, headers=None):
"""
Perform an HTTP GET request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
"""
return self._api_request(action, data, 'GET', headers) |
Perform an HTTP POST request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
def api_post(self, action, data, binary_data_param=None):
"""
Perform an HTTP POST request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
"""
binary_data_param = binary_data_param or []
if binary_data_param:
return self.api_post_multipart(action, data, binary_data_param)
else:
return self._api_request(action, data, 'POST') |
Perform an HTTP Multipart POST request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
@param: binary_data_params: array of multipart keys
def api_post_multipart(self, action, data, binary_data_param):
"""
Perform an HTTP Multipart POST request, using the shared-secret auth hash.
@param action: API action call
@param data: dictionary values
@param: binary_data_params: array of multipart keys
"""
binary_data = {}
data = data.copy()
try:
file_handles = []
for param in binary_data_param:
if param in data:
binary_data[param] = file_handle = open(data[param], 'r')
file_handles.append(file_handle)
del data[param]
json_payload = self._prepare_json_payload(data)
return self._http_request(action, json_payload, "POST", binary_data)
finally:
for file_handle in file_handles:
file_handle.close() |
Make Request to Sailthru API with given data and api key, format and signature hash
def _api_request(self, action, data, request_type, headers=None):
"""
Make Request to Sailthru API with given data and api key, format and signature hash
"""
if 'file' in data:
file_data = {'file': open(data['file'], 'rb')}
else:
file_data = None
return self._http_request(action, self._prepare_json_payload(data), request_type, file_data, headers) |
Get rate limit information for last API call
:param action: API endpoint
:param method: Http method, GET, POST or DELETE
:return: dict|None
def get_last_rate_limit_info(self, action, method):
"""
Get rate limit information for last API call
:param action: API endpoint
:param method: Http method, GET, POST or DELETE
:return: dict|None
"""
method = method.upper()
if (action in self.last_rate_limit_info and method in self.last_rate_limit_info[action]):
return self.last_rate_limit_info[action][method]
return None |
When having foreign key or m2m relationships between models A and B (B has foreign key to A named parent),
we want to have a form that sits on A's viewset but creates/edits B and sets it relationship to A
automatically.
In order to do so, define linked_forms on A's viewset containing a call to linked_form as follows:
@linked_forms()
class AViewSet(AngularFormMixin, ...):
linked_forms = {
'new-b': linked_form(BViewSet, link='parent')
}
Then, there will be a form definition on <aviewset>/pk/forms/new-b, with POST/PATCH operations pointing
to an automatically created endpoint <aviewset>/pk/linked-endpoint/new-b and detail-route named "new_b"
:param viewset: the foreign viewset
:param form_id: id of the form on the foreign viewset. If unset, use the default form
:param link: either a field name on the foreign viewset or a callable that will get (foreign_instance, this_instance)
:return: an internal definition of a linked form
def linked_form(viewset, form_id=None, link=None, link_id=None, method=None):
"""
When having foreign key or m2m relationships between models A and B (B has foreign key to A named parent),
we want to have a form that sits on A's viewset but creates/edits B and sets it relationship to A
automatically.
In order to do so, define linked_forms on A's viewset containing a call to linked_form as follows:
@linked_forms()
class AViewSet(AngularFormMixin, ...):
linked_forms = {
'new-b': linked_form(BViewSet, link='parent')
}
Then, there will be a form definition on <aviewset>/pk/forms/new-b, with POST/PATCH operations pointing
to an automatically created endpoint <aviewset>/pk/linked-endpoint/new-b and detail-route named "new_b"
:param viewset: the foreign viewset
:param form_id: id of the form on the foreign viewset. If unset, use the default form
:param link: either a field name on the foreign viewset or a callable that will get (foreign_instance, this_instance)
:return: an internal definition of a linked form
"""
return {
'viewset' : viewset,
'form_id' : form_id,
'link' : link,
'link_id' : link_id,
'method' : method
} |
Mint record identifiers.
:param record_uuid: The record UUID.
:param data: The record data.
:returns: A :class:`invenio_pidstore.models.PersistentIdentifier` instance.
def oaiid_minter(record_uuid, data):
"""Mint record identifiers.
:param record_uuid: The record UUID.
:param data: The record data.
:returns: A :class:`invenio_pidstore.models.PersistentIdentifier` instance.
"""
pid_value = data.get('_oai', {}).get('id')
if pid_value is None:
fetcher_name = \
current_app.config.get('OAISERVER_CONTROL_NUMBER_FETCHER', 'recid')
cn_pid = current_pidstore.fetchers[fetcher_name](record_uuid, data)
pid_value = current_app.config.get('OAISERVER_ID_PREFIX', '') + str(
cn_pid.pid_value
)
provider = OAIIDProvider.create(
object_type='rec', object_uuid=record_uuid,
pid_value=str(pid_value)
)
data.setdefault('_oai', {})
data['_oai']['id'] = provider.pid.pid_value
return provider.pid |
Return formatter validation error.
def validation_error(exception):
"""Return formatter validation error."""
messages = getattr(exception, 'messages', None)
if messages is None:
messages = getattr(exception, 'data', {'messages': None})['messages']
def extract_errors():
"""Extract errors from exception."""
if isinstance(messages, dict):
for field, message in messages.items():
if field == 'verb':
yield 'badVerb', '\n'.join(message)
else:
yield 'badArgument', '\n'.join(message)
else:
for field in exception.field_names:
if field == 'verb':
yield 'badVerb', '\n'.join(messages)
else:
yield 'badArgument', '\n'.join(messages)
if not exception.field_names:
yield 'badArgument', '\n'.join(messages)
return (etree.tostring(xml.error(extract_errors())),
422,
{'Content-Type': 'text/xml'}) |
Response endpoint.
def response(args):
"""Response endpoint."""
e_tree = getattr(xml, args['verb'].lower())(**args)
response = make_response(etree.tostring(
e_tree,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8',
))
response.headers['Content-Type'] = 'text/xml'
return response |
Create a new record identifier.
:param object_type: The object type. (Default: ``None``)
:param object_uuid: The object UUID. (Default: ``None``)
def create(cls, object_type=None, object_uuid=None, **kwargs):
"""Create a new record identifier.
:param object_type: The object type. (Default: ``None``)
:param object_uuid: The object UUID. (Default: ``None``)
"""
assert 'pid_value' in kwargs
kwargs.setdefault('status', cls.default_status)
if object_type and object_uuid:
kwargs['status'] = PIDStatus.REGISTERED
return super(OAIIDProvider, cls).create(
object_type=object_type, object_uuid=object_uuid, **kwargs) |
Update mappings with the percolator field.
.. note::
This is only needed from ElasticSearch v5 onwards, because percolators
are now just a special type of field inside mappings.
def _create_percolator_mapping(index, doc_type):
"""Update mappings with the percolator field.
.. note::
This is only needed from ElasticSearch v5 onwards, because percolators
are now just a special type of field inside mappings.
"""
if ES_VERSION[0] >= 5:
current_search_client.indices.put_mapping(
index=index, doc_type=doc_type,
body=PERCOLATOR_MAPPING, ignore=[400, 404]) |
Get results for a percolate query.
def _percolate_query(index, doc_type, percolator_doc_type, document):
"""Get results for a percolate query."""
if ES_VERSION[0] in (2, 5):
results = current_search_client.percolate(
index=index, doc_type=doc_type, allow_no_indices=True,
ignore_unavailable=True, body={'doc': document}
)
return results['matches']
elif ES_VERSION[0] == 6:
results = current_search_client.search(
index=index, doc_type=percolator_doc_type, allow_no_indices=True,
ignore_unavailable=True, body={
'query': {
'percolate': {
'field': 'query',
'document_type': percolator_doc_type,
'document': document,
}
}
}
)
return results['hits']['hits'] |
Create new percolator associated with the new set.
def _new_percolator(spec, search_pattern):
"""Create new percolator associated with the new set."""
if spec and search_pattern:
query = query_string_parser(search_pattern=search_pattern).to_dict()
for index in current_search.mappings.keys():
# Create the percolator doc_type in the existing index for >= ES5
# TODO: Consider doing this only once in app initialization
percolator_doc_type = _get_percolator_doc_type(index)
_create_percolator_mapping(index, percolator_doc_type)
current_search_client.index(
index=index, doc_type=percolator_doc_type,
id='oaiset-{}'.format(spec),
body={'query': query}
) |
Delete percolator associated with the new oaiset.
def _delete_percolator(spec, search_pattern):
"""Delete percolator associated with the new oaiset."""
if spec:
for index in current_search.mappings.keys():
# Create the percolator doc_type in the existing index for >= ES5
percolator_doc_type = _get_percolator_doc_type(index)
_create_percolator_mapping(index, percolator_doc_type)
current_search_client.delete(
index=index, doc_type=percolator_doc_type,
id='oaiset-{}'.format(spec), ignore=[404]
) |
Build sets cache.
def _build_cache():
"""Build sets cache."""
sets = current_oaiserver.sets
if sets is None:
# build sets cache
sets = current_oaiserver.sets = [
oaiset.spec for oaiset in OAISet.query.filter(
OAISet.search_pattern.is_(None)).all()]
return sets |
Find matching sets.
def get_record_sets(record):
"""Find matching sets."""
# get lists of sets with search_pattern equals to None but already in the
# set list inside the record
record_sets = set(record.get('_oai', {}).get('sets', []))
for spec in _build_cache():
if spec in record_sets:
yield spec
# get list of sets that match using percolator
index, doc_type = RecordIndexer().record_to_index(record)
document = record.dumps()
percolator_doc_type = _get_percolator_doc_type(index)
_create_percolator_mapping(index, percolator_doc_type)
results = _percolate_query(index, doc_type, percolator_doc_type, document)
prefix = 'oaiset-'
prefix_len = len(prefix)
for match in results:
set_name = match['_id']
if set_name.startswith(prefix):
name = set_name[prefix_len:]
yield name
raise StopIteration |
Commit all records.
def _records_commit(record_ids):
"""Commit all records."""
for record_id in record_ids:
record = Record.get_record(record_id)
record.commit() |
Update all affected records by OAISet change.
:param spec: The record spec.
:param search_pattern: The search pattern.
def update_affected_records(spec=None, search_pattern=None):
"""Update all affected records by OAISet change.
:param spec: The record spec.
:param search_pattern: The search pattern.
"""
chunk_size = current_app.config['OAISERVER_CELERY_TASK_CHUNK_SIZE']
record_ids = get_affected_records(spec=spec, search_pattern=search_pattern)
group(
update_records_sets.s(list(filter(None, chunk)))
for chunk in zip_longest(*[iter(record_ids)] * chunk_size)
)() |
Create OAI-PMH envelope for response.
def envelope(**kwargs):
"""Create OAI-PMH envelope for response."""
e_oaipmh = Element(etree.QName(NS_OAIPMH, 'OAI-PMH'), nsmap=NSMAP)
e_oaipmh.set(etree.QName(NS_XSI, 'schemaLocation'),
'{0} {1}'.format(NS_OAIPMH, NS_OAIPMH_XSD))
e_tree = ElementTree(element=e_oaipmh)
if current_app.config['OAISERVER_XSL_URL']:
e_oaipmh.addprevious(etree.ProcessingInstruction(
'xml-stylesheet', 'type="text/xsl" href="{0}"'
.format(current_app.config['OAISERVER_XSL_URL'])))
e_responseDate = SubElement(
e_oaipmh, etree.QName(
NS_OAIPMH, 'responseDate'))
# date should be first possible moment
e_responseDate.text = datetime_to_datestamp(datetime.utcnow())
e_request = SubElement(e_oaipmh, etree.QName(NS_OAIPMH, 'request'))
for key, value in kwargs.items():
if key == 'from_' or key == 'until':
value = datetime_to_datestamp(value)
elif key == 'resumptionToken':
value = value['token']
e_request.set(key, value)
e_request.text = url_for('invenio_oaiserver.response', _external=True)
return e_tree, e_oaipmh |
Create error element.
def error(errors):
"""Create error element."""
e_tree, e_oaipmh = envelope()
for code, message in errors:
e_error = SubElement(e_oaipmh, etree.QName(NS_OAIPMH, 'error'))
e_error.set('code', code)
e_error.text = message
return e_tree |
Create OAI-PMH envelope for response with verb.
def verb(**kwargs):
"""Create OAI-PMH envelope for response with verb."""
e_tree, e_oaipmh = envelope(**kwargs)
e_element = SubElement(e_oaipmh, etree.QName(NS_OAIPMH, kwargs['verb']))
return e_tree, e_element |
Create OAI-PMH response for verb Identify.
def identify(**kwargs):
"""Create OAI-PMH response for verb Identify."""
cfg = current_app.config
e_tree, e_identify = verb(**kwargs)
e_repositoryName = SubElement(
e_identify, etree.QName(NS_OAIPMH, 'repositoryName'))
e_repositoryName.text = cfg['OAISERVER_REPOSITORY_NAME']
e_baseURL = SubElement(e_identify, etree.QName(NS_OAIPMH, 'baseURL'))
e_baseURL.text = url_for('invenio_oaiserver.response', _external=True)
e_protocolVersion = SubElement(e_identify,
etree.QName(NS_OAIPMH, 'protocolVersion'))
e_protocolVersion.text = cfg['OAISERVER_PROTOCOL_VERSION']
for adminEmail in cfg['OAISERVER_ADMIN_EMAILS']:
e = SubElement(e_identify, etree.QName(NS_OAIPMH, 'adminEmail'))
e.text = adminEmail
e_earliestDatestamp = SubElement(
e_identify, etree.QName(
NS_OAIPMH, 'earliestDatestamp'))
earliest_date = datetime(MINYEAR, 1, 1)
earliest_record = OAIServerSearch(
index=current_app.config['OAISERVER_RECORD_INDEX']).sort({
"_created": {"order": "asc"}})[0:1].execute()
if len(earliest_record.hits.hits) > 0:
created_date_str = earliest_record.hits.hits[0].get(
"_source", {}).get('_created')
if created_date_str:
earliest_date = arrow.get(
created_date_str).to('utc').datetime.replace(tzinfo=None)
e_earliestDatestamp.text = datetime_to_datestamp(earliest_date)
e_deletedRecord = SubElement(e_identify,
etree.QName(NS_OAIPMH, 'deletedRecord'))
e_deletedRecord.text = 'no'
e_granularity = SubElement(e_identify,
etree.QName(NS_OAIPMH, 'granularity'))
assert cfg['OAISERVER_GRANULARITY'] in DATETIME_FORMATS
e_granularity.text = cfg['OAISERVER_GRANULARITY']
compressions = cfg['OAISERVER_COMPRESSIONS']
if compressions != ['identity']:
for compression in compressions:
e_compression = SubElement(e_identify,
etree.QName(NS_OAIPMH, 'compression'))
e_compression.text = compression
for description in cfg.get('OAISERVER_DESCRIPTIONS', []):
e_description = SubElement(e_identify,
etree.QName(NS_OAIPMH, 'description'))
e_description.append(etree.fromstring(description))
return e_tree |
Attach resumption token element to a parent.
def resumption_token(parent, pagination, **kwargs):
"""Attach resumption token element to a parent."""
# Do not add resumptionToken if all results fit to the first page.
if pagination.page == 1 and not pagination.has_next:
return
token = serialize(pagination, **kwargs)
e_resumptionToken = SubElement(parent, etree.QName(NS_OAIPMH,
'resumptionToken'))
if pagination.total:
expiration_date = datetime.utcnow() + timedelta(
seconds=current_app.config[
'OAISERVER_RESUMPTION_TOKEN_EXPIRE_TIME'
]
)
e_resumptionToken.set('expirationDate', datetime_to_datestamp(
expiration_date
))
e_resumptionToken.set('cursor', str(
(pagination.page - 1) * pagination.per_page
))
e_resumptionToken.set('completeListSize', str(pagination.total))
if token:
e_resumptionToken.text = token |
Create OAI-PMH response for ListSets verb.
def listsets(**kwargs):
"""Create OAI-PMH response for ListSets verb."""
e_tree, e_listsets = verb(**kwargs)
page = kwargs.get('resumptionToken', {}).get('page', 1)
size = current_app.config['OAISERVER_PAGE_SIZE']
oai_sets = OAISet.query.paginate(page=page, per_page=size, error_out=False)
for oai_set in oai_sets.items:
e_set = SubElement(e_listsets, etree.QName(NS_OAIPMH, 'set'))
e_setSpec = SubElement(e_set, etree.QName(NS_OAIPMH, 'setSpec'))
e_setSpec.text = oai_set.spec
e_setName = SubElement(e_set, etree.QName(NS_OAIPMH, 'setName'))
e_setName.text = sanitize_unicode(oai_set.name)
if oai_set.description:
e_setDescription = SubElement(e_set, etree.QName(NS_OAIPMH,
'setDescription'))
e_dc = SubElement(
e_setDescription, etree.QName(NS_OAIDC, 'dc'),
nsmap=NSMAP_DESCRIPTION
)
e_dc.set(etree.QName(NS_XSI, 'schemaLocation'), NS_OAIDC)
e_description = SubElement(e_dc, etree.QName(NS_DC, 'description'))
e_description.text = oai_set.description
resumption_token(e_listsets, oai_sets, **kwargs)
return e_tree |
Create OAI-PMH response for ListMetadataFormats verb.
def listmetadataformats(**kwargs):
"""Create OAI-PMH response for ListMetadataFormats verb."""
cfg = current_app.config
e_tree, e_listmetadataformats = verb(**kwargs)
if 'identifier' in kwargs:
# test if record exists
OAIIDProvider.get(pid_value=kwargs['identifier'])
for prefix, metadata in cfg.get('OAISERVER_METADATA_FORMATS', {}).items():
e_metadataformat = SubElement(
e_listmetadataformats, etree.QName(NS_OAIPMH, 'metadataFormat')
)
e_metadataprefix = SubElement(
e_metadataformat, etree.QName(NS_OAIPMH, 'metadataPrefix')
)
e_metadataprefix.text = prefix
e_schema = SubElement(
e_metadataformat, etree.QName(NS_OAIPMH, 'schema')
)
e_schema.text = metadata['schema']
e_metadataNamespace = SubElement(
e_metadataformat, etree.QName(NS_OAIPMH, 'metadataNamespace')
)
e_metadataNamespace.text = metadata['namespace']
return e_tree |
Attach ``<header/>`` element to a parent.
def header(parent, identifier, datestamp, sets=None, deleted=False):
"""Attach ``<header/>`` element to a parent."""
e_header = SubElement(parent, etree.QName(NS_OAIPMH, 'header'))
if deleted:
e_header.set('status', 'deleted')
e_identifier = SubElement(e_header, etree.QName(NS_OAIPMH, 'identifier'))
e_identifier.text = identifier
e_datestamp = SubElement(e_header, etree.QName(NS_OAIPMH, 'datestamp'))
e_datestamp.text = datetime_to_datestamp(datestamp)
for spec in sets or []:
e = SubElement(e_header, etree.QName(NS_OAIPMH, 'setSpec'))
e.text = spec
return e_header |
Create OAI-PMH response for verb Identify.
def getrecord(**kwargs):
"""Create OAI-PMH response for verb Identify."""
record_dumper = serializer(kwargs['metadataPrefix'])
pid = OAIIDProvider.get(pid_value=kwargs['identifier']).pid
record = Record.get_record(pid.object_uuid)
e_tree, e_getrecord = verb(**kwargs)
e_record = SubElement(e_getrecord, etree.QName(NS_OAIPMH, 'record'))
header(
e_record,
identifier=pid.pid_value,
datestamp=record.updated,
sets=record.get('_oai', {}).get('sets', []),
)
e_metadata = SubElement(e_record,
etree.QName(NS_OAIPMH, 'metadata'))
e_metadata.append(record_dumper(pid, {'_source': record}))
return e_tree |
Create OAI-PMH response for verb ListIdentifiers.
def listidentifiers(**kwargs):
"""Create OAI-PMH response for verb ListIdentifiers."""
e_tree, e_listidentifiers = verb(**kwargs)
result = get_records(**kwargs)
for record in result.items:
pid = oaiid_fetcher(record['id'], record['json']['_source'])
header(
e_listidentifiers,
identifier=pid.pid_value,
datestamp=record['updated'],
sets=record['json']['_source'].get('_oai', {}).get('sets', []),
)
resumption_token(e_listidentifiers, result, **kwargs)
return e_tree |
Create OAI-PMH response for verb ListRecords.
def listrecords(**kwargs):
"""Create OAI-PMH response for verb ListRecords."""
record_dumper = serializer(kwargs['metadataPrefix'])
e_tree, e_listrecords = verb(**kwargs)
result = get_records(**kwargs)
for record in result.items:
pid = oaiid_fetcher(record['id'], record['json']['_source'])
e_record = SubElement(e_listrecords,
etree.QName(NS_OAIPMH, 'record'))
header(
e_record,
identifier=pid.pid_value,
datestamp=record['updated'],
sets=record['json']['_source'].get('_oai', {}).get('sets', []),
)
e_metadata = SubElement(e_record, etree.QName(NS_OAIPMH, 'metadata'))
e_metadata.append(record_dumper(pid, record['json']))
resumption_token(e_listrecords, result, **kwargs)
return e_tree |
Fetch a record's identifier.
:param record_uuid: The record UUID.
:param data: The record data.
:returns: A :class:`invenio_pidstore.fetchers.FetchedPID` instance.
def oaiid_fetcher(record_uuid, data):
"""Fetch a record's identifier.
:param record_uuid: The record UUID.
:param data: The record data.
:returns: A :class:`invenio_pidstore.fetchers.FetchedPID` instance.
"""
pid_value = data.get('_oai', {}).get('id')
if pid_value is None:
raise PersistentIdentifierError()
return FetchedPID(
provider=OAIIDProvider,
pid_type=OAIIDProvider.pid_type,
pid_value=str(pid_value),
) |
Forbit updates of set identifier.
def validate_spec(self, key, value):
"""Forbit updates of set identifier."""
if self.spec and self.spec != value:
raise OAISetSpecUpdateError("Updating spec is not allowed.")
return value |
Add a record to the OAISet.
:param record: Record to be added.
:type record: `invenio_records.api.Record` or derivative.
def add_record(self, record):
"""Add a record to the OAISet.
:param record: Record to be added.
:type record: `invenio_records.api.Record` or derivative.
"""
record.setdefault('_oai', {}).setdefault('sets', [])
assert not self.has_record(record)
record['_oai']['sets'].append(self.spec) |
Remove a record from the OAISet.
:param record: Record to be removed.
:type record: `invenio_records.api.Record` or derivative.
def remove_record(self, record):
"""Remove a record from the OAISet.
:param record: Record to be removed.
:type record: `invenio_records.api.Record` or derivative.
"""
assert self.has_record(record)
record['_oai']['sets'] = [
s for s in record['_oai']['sets'] if s != self.spec] |
Initialize OAI-PMH server.
def oaiserver(sets, records):
"""Initialize OAI-PMH server."""
from invenio_db import db
from invenio_oaiserver.models import OAISet
from invenio_records.api import Record
# create a OAI Set
with db.session.begin_nested():
for i in range(sets):
db.session.add(OAISet(
spec='test{0}'.format(i),
name='Test{0}'.format(i),
description='test desc {0}'.format(i),
search_pattern='title_statement.title:Test{0}'.format(i),
))
# create a record
schema = {
'type': 'object',
'properties': {
'title_statement': {
'type': 'object',
'properties': {
'title': {
'type': 'string',
},
},
},
'field': {'type': 'boolean'},
},
}
with app.app_context():
indexer = RecordIndexer()
with db.session.begin_nested():
for i in range(records):
record_id = uuid.uuid4()
data = {
'title_statement': {'title': 'Test{0}'.format(i)},
'$schema': schema,
}
recid_minter(record_id, data)
oaiid_minter(record_id, data)
record = Record.create(data, id_=record_id)
indexer.index(record)
db.session.commit() |
Return etree_dumper instances.
:param metadata_prefix: One of the metadata identifiers configured in
``OAISERVER_METADATA_FORMATS``.
def serializer(metadata_prefix):
"""Return etree_dumper instances.
:param metadata_prefix: One of the metadata identifiers configured in
``OAISERVER_METADATA_FORMATS``.
"""
metadataFormats = current_app.config['OAISERVER_METADATA_FORMATS']
serializer_ = metadataFormats[metadata_prefix]['serializer']
if isinstance(serializer_, tuple):
return partial(import_string(serializer_[0]), **serializer_[1])
return import_string(serializer_) |
Dump MARC21 compatible record.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:returns: A LXML Element instance.
def dumps_etree(pid, record, **kwargs):
"""Dump MARC21 compatible record.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:returns: A LXML Element instance.
"""
from dojson.contrib.to_marc21 import to_marc21
from dojson.contrib.to_marc21.utils import dumps_etree
return dumps_etree(to_marc21.do(record['_source']), **kwargs) |
Generate the eprints element for the identify response.
The eprints container is used by the e-print community to describe
the content and policies of repositories.
For the full specification and schema definition visit:
http://www.openarchives.org/OAI/2.0/guidelines-eprints.htm
def eprints_description(metadataPolicy, dataPolicy,
submissionPolicy=None, content=None):
"""Generate the eprints element for the identify response.
The eprints container is used by the e-print community to describe
the content and policies of repositories.
For the full specification and schema definition visit:
http://www.openarchives.org/OAI/2.0/guidelines-eprints.htm
"""
eprints = Element(etree.QName(NS_EPRINTS[None], 'eprints'),
nsmap=NS_EPRINTS)
eprints.set(etree.QName(ns['xsi'], 'schemaLocation'),
'{0} {1}'.format(EPRINTS_SCHEMA_LOCATION,
EPRINTS_SCHEMA_LOCATION_XSD))
if content:
contentElement = etree.Element('content')
for key, value in content.items():
contentElement.append(E(key, value))
eprints.append(contentElement)
metadataPolicyElement = etree.Element('metadataPolicy')
for key, value in metadataPolicy.items():
metadataPolicyElement.append(E(key, value))
eprints.append(metadataPolicyElement)
dataPolicyElement = etree.Element('dataPolicy')
for key, value in dataPolicy.items():
dataPolicyElement.append(E(key, value))
eprints.append(dataPolicyElement)
if submissionPolicy:
submissionPolicyElement = etree.Element('submissionPolicy')
for key, value in submissionPolicy.items():
submissionPolicyElement.append(E(key, value))
eprints.append(submissionPolicyElement)
return etree.tostring(eprints, pretty_print=True) |
Generate the oai-identifier element for the identify response.
The OAI identifier format is intended to provide persistent resource
identifiers for items in repositories that implement OAI-PMH.
For the full specification and schema definition visit:
http://www.openarchives.org/OAI/2.0/guidelines-oai-identifier.htm
def oai_identifier_description(scheme, repositoryIdentifier,
delimiter, sampleIdentifier):
"""Generate the oai-identifier element for the identify response.
The OAI identifier format is intended to provide persistent resource
identifiers for items in repositories that implement OAI-PMH.
For the full specification and schema definition visit:
http://www.openarchives.org/OAI/2.0/guidelines-oai-identifier.htm
"""
oai_identifier = Element(etree.QName(NS_OAI_IDENTIFIER[None],
'oai_identifier'),
nsmap=NS_OAI_IDENTIFIER)
oai_identifier.set(etree.QName(ns['xsi'], 'schemaLocation'),
'{0} {1}'.format(OAI_IDENTIFIER_SCHEMA_LOCATION,
OAI_IDENTIFIER_SCHEMA_LOCATION_XSD))
oai_identifier.append(E('scheme', scheme))
oai_identifier.append(E('repositoryIdentifier', repositoryIdentifier))
oai_identifier.append(E('delimiter', delimiter))
oai_identifier.append(E('sampleIdentifier', sampleIdentifier))
return etree.tostring(oai_identifier, pretty_print=True) |
Generate the friends element for the identify response.
The friends container is recommended for use by repositories
to list confederate repositories.
For the schema definition visit:
http://www.openarchives.org/OAI/2.0/guidelines-friends.htm
def friends_description(baseURLs):
"""Generate the friends element for the identify response.
The friends container is recommended for use by repositories
to list confederate repositories.
For the schema definition visit:
http://www.openarchives.org/OAI/2.0/guidelines-friends.htm
"""
friends = Element(etree.QName(NS_FRIENDS[None], 'friends'),
nsmap=NS_FRIENDS)
friends.set(etree.QName(ns['xsi'], 'schemaLocation'),
'{0} {1}'.format(FRIENDS_SCHEMA_LOCATION,
FRIENDS_SCHEMA_LOCATION_XSD))
for baseURL in baseURLs:
friends.append(E('baseURL', baseURL))
return etree.tostring(friends, pretty_print=True) |
Update records on OAISet insertion.
def after_insert_oai_set(mapper, connection, target):
"""Update records on OAISet insertion."""
_new_percolator(spec=target.spec, search_pattern=target.search_pattern)
sleep(2)
update_affected_records.delay(
search_pattern=target.search_pattern
) |
Update records on OAISet update.
def after_update_oai_set(mapper, connection, target):
"""Update records on OAISet update."""
_delete_percolator(spec=target.spec, search_pattern=target.search_pattern)
_new_percolator(spec=target.spec, search_pattern=target.search_pattern)
sleep(2)
update_affected_records.delay(
spec=target.spec, search_pattern=target.search_pattern
) |
Update records on OAISet deletion.
def after_delete_oai_set(mapper, connection, target):
"""Update records on OAISet deletion."""
_delete_percolator(spec=target.spec, search_pattern=target.search_pattern)
sleep(2)
update_affected_records.delay(
spec=target.spec
) |
Elasticsearch query string parser.
def query_string_parser(search_pattern):
"""Elasticsearch query string parser."""
if not hasattr(current_oaiserver, 'query_parser'):
query_parser = current_app.config['OAISERVER_QUERY_PARSER']
if isinstance(query_parser, six.string_types):
query_parser = import_string(query_parser)
current_oaiserver.query_parser = query_parser
return current_oaiserver.query_parser('query_string', query=search_pattern) |
Get list of affected records.
:param spec: The record spec.
:param search_pattern: The search pattern.
:returns: An iterator to lazily find results.
def get_affected_records(spec=None, search_pattern=None):
"""Get list of affected records.
:param spec: The record spec.
:param search_pattern: The search pattern.
:returns: An iterator to lazily find results.
"""
# spec pattern query
# ---------- ---------- -------
# None None None
# None Y Y
# X None X
# X '' X
# X Y X OR Y
if spec is None and search_pattern is None:
raise StopIteration
queries = []
if spec is not None:
queries.append(Q('match', **{'_oai.sets': spec}))
if search_pattern:
queries.append(query_string_parser(search_pattern=search_pattern))
search = OAIServerSearch(
index=current_app.config['OAISERVER_RECORD_INDEX'],
).query(Q('bool', should=queries))
for result in search.scan():
yield result.meta.id |
Get records paginated.
def get_records(**kwargs):
"""Get records paginated."""
page_ = kwargs.get('resumptionToken', {}).get('page', 1)
size_ = current_app.config['OAISERVER_PAGE_SIZE']
scroll = current_app.config['OAISERVER_RESUMPTION_TOKEN_EXPIRE_TIME']
scroll_id = kwargs.get('resumptionToken', {}).get('scroll_id')
if scroll_id is None:
search = OAIServerSearch(
index=current_app.config['OAISERVER_RECORD_INDEX'],
).params(
scroll='{0}s'.format(scroll),
).extra(
version=True,
)[(page_-1)*size_:page_*size_]
if 'set' in kwargs:
search = search.query('match', **{'_oai.sets': kwargs['set']})
time_range = {}
if 'from_' in kwargs:
time_range['gte'] = kwargs['from_']
if 'until' in kwargs:
time_range['lte'] = kwargs['until']
if time_range:
search = search.filter('range', **{'_updated': time_range})
response = search.execute().to_dict()
else:
response = current_search_client.scroll(
scroll_id=scroll_id,
scroll='{0}s'.format(scroll),
)
class Pagination(object):
"""Dummy pagination class."""
page = page_
per_page = size_
def __init__(self, response):
"""Initilize pagination."""
self.response = response
self.total = response['hits']['total']
self._scroll_id = response.get('_scroll_id')
# clean descriptor on last page
if not self.has_next:
current_search_client.clear_scroll(
scroll_id=self._scroll_id
)
self._scroll_id = None
@cached_property
def has_next(self):
"""Return True if there is next page."""
return self.page * self.per_page <= self.total
@cached_property
def next_num(self):
"""Return next page number."""
return self.page + 1 if self.has_next else None
@property
def items(self):
"""Return iterator."""
from datetime import datetime
for result in self.response['hits']['hits']:
if '_oai' in result['_source']:
yield {
'id': result['_id'],
'json': result,
'updated': datetime.strptime(
result['_source']['_updated'][:19],
'%Y-%m-%dT%H:%M:%S'
),
}
return Pagination(response) |
Look for an existing path matching filename.
Try to resolve relative to the module location if the path cannot by found
using "normal" resolution.
def get_file_path(filename, local=True, relative_to_module=None, my_dir=my_dir):
"""
Look for an existing path matching filename.
Try to resolve relative to the module location if the path cannot by found
using "normal" resolution.
"""
# override my_dir if module is provided
if relative_to_module is not None:
my_dir = os.path.dirname(relative_to_module.__file__)
user_path = result = filename
if local:
user_path = os.path.expanduser(filename)
result = os.path.abspath(user_path)
if os.path.exists(result):
return result # The file was found normally
# otherwise look relative to the module.
result = os.path.join(my_dir, filename)
assert os.path.exists(result), "no such file " + repr((filename, result, user_path))
return result |
Load a javascript file to the Jupyter notebook context,
unless it was already loaded.
def load_if_not_loaded(widget, filenames, verbose=False, delay=0.1, force=False, local=True, evaluator=None):
"""
Load a javascript file to the Jupyter notebook context,
unless it was already loaded.
"""
if evaluator is None:
evaluator = EVALUATOR # default if not specified.
for filename in filenames:
loaded = False
if force or not filename in LOADED_JAVASCRIPT:
js_text = get_text_from_file_name(filename, local)
if verbose:
print("loading javascript file", filename, "with", evaluator)
evaluator(widget, js_text)
LOADED_JAVASCRIPT.add(filename)
loaded = True
else:
if verbose:
print ("not reloading javascript file", filename)
if loaded and delay > 0:
if verbose:
print ("delaying to allow JS interpreter to sync.")
time.sleep(delay) |
Proxy to set a property of the widget element.
def _set(self, name, value):
"Proxy to set a property of the widget element."
return self.widget(self.widget_element._set(name, value)) |
Strips the outer tag, if text starts with a tag. Not entity aware;
designed to quickly strip outer tags from lxml cleaner output. Only
checks for <p> and <div> outer tags.
def strip_outer_tag(text):
"""Strips the outer tag, if text starts with a tag. Not entity aware;
designed to quickly strip outer tags from lxml cleaner output. Only
checks for <p> and <div> outer tags."""
if not text or not isinstance(text, basestring):
return text
stripped = text.strip()
if (stripped.startswith('<p>') or stripped.startswith('<div>')) and \
(stripped.endswith('</p>') or stripped.endswith('</div>')):
return stripped[stripped.index('>')+1:stripped.rindex('<')]
return text |
If an author contains an email and a name in it, make sure it is in
the format: "name (email)".
def munge_author(author):
"""If an author contains an email and a name in it, make sure it is in
the format: "name (email)"."""
# this loveliness is from feedparser but was not usable as a function
if '@' in author:
emailmatch = re.search(r"(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?", author, re.UNICODE)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
return '%s (%s)' % (author, email)
return author |
Determine the base url for a root element.
def base_url(root):
"""Determine the base url for a root element."""
for attr, value in root.attrib.iteritems():
if attr.endswith('base') and 'http' in value:
return value
return None |
Return a tag and its namespace separately.
def clean_ns(tag):
"""Return a tag and its namespace separately."""
if '}' in tag:
split = tag.split('}')
return split[0].strip('{'), split[-1]
return '', tag |
A safe xpath that only uses namespaces if available.
def xpath(node, query, namespaces={}):
"""A safe xpath that only uses namespaces if available."""
if namespaces and 'None' not in namespaces:
return node.xpath(query, namespaces=namespaces)
return node.xpath(query) |
Return the inner text of a node. If a node has no sub elements, this
is just node.text. Otherwise, it's node.text + sub-element-text +
node.tail.
def innertext(node):
"""Return the inner text of a node. If a node has no sub elements, this
is just node.text. Otherwise, it's node.text + sub-element-text +
node.tail."""
if not len(node):
return node.text
return (node.text or '') + ''.join([etree.tostring(c) for c in node]) + (node.tail or '') |
Parse a document and return a feedparser dictionary with attr key access.
If clean_html is False, the html in the feed will not be cleaned. If
clean_html is True, a sane version of lxml.html.clean.Cleaner will be used.
If it is a Cleaner object, that cleaner will be used. If unix_timestamp is
True, the date information will be a numerical unix timestamp rather than a
struct_time. If encoding is provided, the encoding of the document will be
manually set to that.
def parse(document, clean_html=True, unix_timestamp=False, encoding=None):
"""Parse a document and return a feedparser dictionary with attr key access.
If clean_html is False, the html in the feed will not be cleaned. If
clean_html is True, a sane version of lxml.html.clean.Cleaner will be used.
If it is a Cleaner object, that cleaner will be used. If unix_timestamp is
True, the date information will be a numerical unix timestamp rather than a
struct_time. If encoding is provided, the encoding of the document will be
manually set to that."""
if isinstance(clean_html, bool):
cleaner = default_cleaner if clean_html else fake_cleaner
else:
cleaner = clean_html
result = feedparser.FeedParserDict()
result['feed'] = feedparser.FeedParserDict()
result['entries'] = []
result['bozo'] = 0
try:
parser = SpeedParser(document, cleaner, unix_timestamp, encoding)
parser.update(result)
except Exception as e:
if isinstance(e, UnicodeDecodeError) and encoding is True:
encoding = chardet.detect(document)['encoding']
document = document.decode(encoding, 'replace').encode('utf-8')
return parse(document, clean_html, unix_timestamp, encoding)
import traceback
result['bozo'] = 1
result['bozo_exception'] = e
result['bozo_tb'] = traceback.format_exc()
return result |
An attempt to parse pieces of an entry out w/o xpath, by looping
over the entry root's children and slotting them into the right places.
This is going to be way messier than SpeedParserEntries, and maybe
less cleanly usable, but it should be faster.
def parse_entry(self, entry):
"""An attempt to parse pieces of an entry out w/o xpath, by looping
over the entry root's children and slotting them into the right places.
This is going to be way messier than SpeedParserEntries, and maybe
less cleanly usable, but it should be faster."""
e = feedparser.FeedParserDict()
tag_map = self.tag_map
nslookup = self.nslookup
for child in entry.getchildren():
if isinstance(child, etree._Comment):
continue
ns, tag = clean_ns(child.tag)
mapping = tag_map.get(tag, None)
if mapping:
getattr(self, 'parse_%s' % mapping)(child, e, nslookup.get(ns, ns))
if not ns:
continue
fulltag = '%s:%s' % (nslookup.get(ns, ''), tag)
mapping = tag_map.get(fulltag, None)
if mapping:
getattr(self, 'parse_%s' % mapping)(child, e, nslookup[ns])
lacks_summary = 'summary' not in e or e['summary'] is None
lacks_content = 'content' not in e or not bool(e.get('content', None))
if not lacks_summary and lacks_content:
e['content'] = [{'value': e.summary}]
# feedparser sometimes copies the first content value into the
# summary field when summary was completely missing; we want
# to do that as well, but avoid the case where summary was given as ''
if lacks_summary and not lacks_content:
e['summary'] = e['content'][0]['value']
if e.get('summary', False) is None:
e['summary'] = u''
# support feed entries that have a guid but no link
if 'guid' in e and 'link' not in e:
e['link'] = full_href(e['guid'], self.baseurl)
return e |
Find any changed path and update all changed modification times.
def changed_path(self):
"Find any changed path and update all changed modification times."
result = None # default
for path in self.paths_to_modification_times:
lastmod = self.paths_to_modification_times[path]
mod = os.path.getmtime(path)
if mod > lastmod:
result = "Watch file has been modified: " + repr(path)
self.paths_to_modification_times[path] = mod
for folder in self.folder_paths:
for filename in os.listdir(folder):
subpath = os.path.join(folder, filename)
if os.path.isfile(subpath) and subpath not in self.paths_to_modification_times:
result = "New file in watched folder: " + repr(subpath)
self.add(subpath)
if self.check_python_modules:
# refresh the modules
self.add_all_modules()
if self.check_javascript:
self.watch_javascript()
return result |
Parse a variety of ISO-8601-compatible formats like 20040105
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm))) |
Parse a string according to the OnBlog 8-bit date format
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate) |
Parse a string according to the Nate 8-bit date format
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate) |
Parse a string according to a Greek 8-bit date format.
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date) |
Parse a string according to a Hungarian 8-bit date format.
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate) |
Parse an RFC822, RFC1123, RFC2822, or asctime-style date
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if not data:
return None
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
# Account for the Etc/GMT timezone by stripping 'Etc/'
elif len(data) == 5 and data[4].lower().startswith('etc/'):
data[4] = data[4][4:]
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
# Jython doesn't adjust for 2-digit years like CPython does,
# so account for it by shifting the year so that it's in the
# range 1970-2069 (1970 being the year of the Unix epoch).
if tm[0] < 100:
tm = (tm[0] + (1900, 2000)[tm[0] < 70],) + tm[1:]
return time.gmtime(rfc822.mktime_tz(tm)) |
parse a date in yyyy/mm/dd hh:mm:ss TTT format
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm)) |
Parses a variety of date formats into a 9-tuple in GMT
def parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None |
wrapper to allow output redirects for handle_chunk.
def handle_chunk_wrapper(self, status, name, content, file_info):
"""wrapper to allow output redirects for handle_chunk."""
out = self.output
if out is not None:
with out:
print("handling chunk " + repr(type(content)))
self.handle_chunk(status, name, content, file_info)
else:
self.handle_chunk(status, name, content, file_info) |
Handle one chunk of the file. Override this method for peicewise delivery or error handling.
def handle_chunk(self, status, name, content, file_info):
"Handle one chunk of the file. Override this method for peicewise delivery or error handling."
if status == "error":
msg = repr(file_info.get("message"))
exc = JavaScriptError(msg)
exc.file_info = file_info
self.status = "Javascript sent exception " + msg
self.chunk_collector = []
raise exc
if status == "more":
self.chunk_collector.append(content)
self.progress_callback(self.chunk_collector, file_info)
else:
assert status == "done", "Unknown status " + repr(status)
self.save_chunks = self.chunk_collector
self.chunk_collector.append(content)
all_content = self.combine_chunks(self.chunk_collector)
self.chunk_collector = []
content_callback = self.content_callback
if content_callback is None:
content_callback = self.default_content_callback
self.status = "calling " + repr(content_callback)
try:
content_callback(self.widget, name, all_content)
except Exception as e:
self.status += "\n" + repr(content_callback) + " raised " + repr(e)
raise |
Return a URL for a user to login/register with ORCID.
Parameters
----------
:param scope: string or iterable of strings
The scope(s) of the authorization request.
For example '/authenticate'
:param redirect_uri: string
The URI to which the user's browser should be redirected after the
login.
:param state: string
An arbitrary token to prevent CSRF. See the OAuth 2 docs for
details.
:param family_names: string
The user's family name, used to fill the registration form.
:param given_names: string
The user's given name, used to fill the registration form.
:param email: string
The user's email address, used to fill the sign-in or registration
form.
:param lang: string
The language in which to display the authorization page.
:param show_login: bool
Determines whether the log-in or registration form will be shown by
default.
Returns
-------
:returns: string
The URL ready to be offered as a link to the user.
def get_login_url(self, scope, redirect_uri, state=None,
family_names=None, given_names=None, email=None,
lang=None, show_login=None):
"""Return a URL for a user to login/register with ORCID.
Parameters
----------
:param scope: string or iterable of strings
The scope(s) of the authorization request.
For example '/authenticate'
:param redirect_uri: string
The URI to which the user's browser should be redirected after the
login.
:param state: string
An arbitrary token to prevent CSRF. See the OAuth 2 docs for
details.
:param family_names: string
The user's family name, used to fill the registration form.
:param given_names: string
The user's given name, used to fill the registration form.
:param email: string
The user's email address, used to fill the sign-in or registration
form.
:param lang: string
The language in which to display the authorization page.
:param show_login: bool
Determines whether the log-in or registration form will be shown by
default.
Returns
-------
:returns: string
The URL ready to be offered as a link to the user.
"""
if not isinstance(scope, string_types):
scope = " ".join(sorted(set(scope)))
data = [("client_id", self._key), ("scope", scope),
("response_type", "code"), ("redirect_uri", redirect_uri)]
if state:
data.append(("state", state))
if family_names:
data.append(("family_names", family_names.encode("utf-8")))
if given_names:
data.append(("given_names", given_names.encode("utf-8")))
if email:
data.append(("email", email))
if lang:
data.append(("lang", lang))
if show_login is not None:
data.append(("show_login", "true" if show_login else "false"))
return self._login_or_register_endpoint + "?" + urlencode(data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.