_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q263100 | get_item_creator | validation | def get_item_creator(item_type):
"""Get item creator according registered item type.
:param item_type: The type of item to be checed.
:type item_type: types.TypeType.
:returns: Creator function. None if type not found.
"""
if item_type not in Pipe.pipe_item_types:
for registered_type in Pipe.pipe_item_types:
if issubclass(item_type, registered_type):
return Pipe.pipe_item_types[registered_type]
return None
else:
return Pipe.pipe_item_types[item_type] | python | {
"resource": ""
} |
q263101 | Pipe.clone | validation | def clone(self):
"""Self-cloning. All its next Pipe objects are cloned too.
:returns: cloned object
"""
new_object = copy.copy(self)
if new_object.next:
new_object.next = new_object.next.clone()
return new_object | python | {
"resource": ""
} |
q263102 | Pipe.append | validation | def append(self, next):
"""Append next object to pipe tail.
:param next: The Pipe object to be appended to tail.
:type next: Pipe object.
"""
next.chained = True
if self.next:
self.next.append(next)
else:
self.next = next | python | {
"resource": ""
} |
q263103 | Pipe.iter | validation | def iter(self, prev=None):
"""Return an generator as iterator object.
:param prev: Previous Pipe object which used for data input.
:returns: A generator for iteration.
"""
if self.next:
generator = self.next.iter(self.func(prev, *self.args, **self.kw))
else:
generator = self.func(prev, *self.args, **self.kw)
return generator | python | {
"resource": ""
} |
q263104 | PipeFunction.reduce | validation | def reduce(func):
"""Wrap a reduce function to Pipe object. Reduce function is a function
with at least two arguments. It works like built-in reduce function.
It takes first argument for accumulated result, second argument for
the new data to process. A keyword-based argument named 'init' is
optional. If init is provided, it is used for the initial value of
accumulated result. Or, the initial value is None.
The first argument is the data to be converted. The return data from
filter function should be a boolean value. If true, data can pass.
Otherwise, data is omitted.
:param func: The filter function to be wrapped.
:type func: function object
:param args: The default arguments to be used for filter function.
:param kw: The default keyword arguments to be used for filter function.
:returns: Pipe object
"""
def wrapper(prev, *argv, **kw):
accum_value = None if 'init' not in kw else kw.pop('init')
if prev is None:
raise TypeError('A reducer must have input.')
for i in prev:
accum_value = func(accum_value, i, *argv, **kw)
yield accum_value
return Pipe(wrapper) | python | {
"resource": ""
} |
q263105 | _list_networks | validation | def _list_networks():
"""Return a dictionary of network name to active status bools.
Sample virsh net-list output::
Name State Autostart
-----------------------------------------
default active yes
juju-test inactive no
foobar inactive no
Parsing the above would return::
{"default": True, "juju-test": False, "foobar": False}
See: http://goo.gl/kXwfC
"""
output = core.run("virsh net-list --all")
networks = {}
# Take the header off and normalize whitespace.
net_lines = [n.strip() for n in output.splitlines()[2:]]
for line in net_lines:
if not line:
continue
name, state, auto = line.split()
networks[name] = state == "active"
return networks | python | {
"resource": ""
} |
q263106 | Captain.flush | validation | def flush(self, line):
"""flush the line to stdout"""
# TODO -- maybe use echo?
sys.stdout.write(line)
sys.stdout.flush() | python | {
"resource": ""
} |
q263107 | Captain.execute | validation | def execute(self, arg_str='', **kwargs):
"""runs the passed in arguments and returns an iterator on the output of
running command"""
cmd = "{} {} {}".format(self.cmd_prefix, self.script, arg_str)
expected_ret_code = kwargs.pop('code', 0)
# any kwargs with all capital letters should be considered environment
# variables
environ = self.environ
for k in list(kwargs.keys()):
if k.isupper():
environ[k] = kwargs.pop(k)
# we will allow overriding of these values
kwargs.setdefault("stderr", subprocess.STDOUT)
# we will not allow these to be overridden via kwargs
kwargs["shell"] = True
kwargs["stdout"] = subprocess.PIPE
kwargs["cwd"] = self.cwd
kwargs["env"] = environ
process = None
self.buf = deque(maxlen=self.bufsize)
try:
process = subprocess.Popen(
cmd,
**kwargs
)
# another round of links
# http://stackoverflow.com/a/17413045/5006 (what I used)
# http://stackoverflow.com/questions/2715847/
for line in iter(process.stdout.readline, b""):
line = line.decode(self.encoding)
self.buf.append(line.rstrip())
yield line
process.wait()
if process.returncode != expected_ret_code:
if process.returncode > 0:
raise RuntimeError("{} returned {} with output: {}".format(
cmd,
process.returncode,
self.output
))
except subprocess.CalledProcessError as e:
if e.returncode != expected_ret_code:
raise RuntimeError("{} returned {} with output: {}".format(
cmd,
e.returncode,
self.output
))
finally:
if process:
process.stdout.close() | python | {
"resource": ""
} |
q263108 | get_request_subfields | validation | def get_request_subfields(root):
"""Build a basic 035 subfield with basic information from the OAI-PMH request.
:param root: ElementTree root node
:return: list of subfield tuples [(..),(..)]
"""
request = root.find('request')
responsedate = root.find('responseDate')
subs = [("9", request.text),
("h", responsedate.text),
("m", request.attrib["metadataPrefix"])]
return subs | python | {
"resource": ""
} |
q263109 | strip_xml_namespace | validation | def strip_xml_namespace(root):
"""Strip out namespace data from an ElementTree.
This function is recursive and will traverse all
subnodes to the root element
@param root: the root element
@return: the same root element, minus namespace
"""
try:
root.tag = root.tag.split('}')[1]
except IndexError:
pass
for element in root.getchildren():
strip_xml_namespace(element) | python | {
"resource": ""
} |
q263110 | ConfigDict.load_dict | validation | def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self | python | {
"resource": ""
} |
q263111 | json | validation | def json(request, *args, **kwargs):
"""
The oembed endpoint, or the url to which requests for metadata are passed.
Third parties will want to access this view with URLs for your site's
content and be returned OEmbed metadata.
"""
# coerce to dictionary
params = dict(request.GET.items())
callback = params.pop('callback', None)
url = params.pop('url', None)
if not url:
return HttpResponseBadRequest('Required parameter missing: URL')
try:
provider = oembed.site.provider_for_url(url)
if not provider.provides:
raise OEmbedMissingEndpoint()
except OEmbedMissingEndpoint:
raise Http404('No provider found for %s' % url)
query = dict([(smart_str(k), smart_str(v)) for k, v in params.items() if v])
try:
resource = oembed.site.embed(url, **query)
except OEmbedException, e:
raise Http404('Error embedding %s: %s' % (url, str(e)))
response = HttpResponse(mimetype='application/json')
json = resource.json
if callback:
response.write('%s(%s)' % (defaultfilters.force_escape(callback), json))
else:
response.write(json)
return response | python | {
"resource": ""
} |
q263112 | consume_json | validation | def consume_json(request):
"""
Extract and return oembed content for given urls.
Required GET params:
urls - list of urls to consume
Optional GET params:
width - maxwidth attribute for oembed content
height - maxheight attribute for oembed content
template_dir - template_dir to use when rendering oembed
Returns:
list of dictionaries with oembed metadata and renderings, json encoded
"""
client = OEmbedConsumer()
urls = request.GET.getlist('urls')
width = request.GET.get('width')
height = request.GET.get('height')
template_dir = request.GET.get('template_dir')
output = {}
ctx = RequestContext(request)
for url in urls:
try:
provider = oembed.site.provider_for_url(url)
except OEmbedMissingEndpoint:
oembeds = None
rendered = None
else:
oembeds = url
rendered = client.parse_text(url, width, height, context=ctx, template_dir=template_dir)
output[url] = {
'oembeds': oembeds,
'rendered': rendered,
}
return HttpResponse(simplejson.dumps(output), mimetype='application/json') | python | {
"resource": ""
} |
q263113 | oembed_schema | validation | def oembed_schema(request):
"""
A site profile detailing valid endpoints for a given domain. Allows for
better auto-discovery of embeddable content.
OEmbed-able content lives at a URL that maps to a provider.
"""
current_domain = Site.objects.get_current().domain
url_schemes = [] # a list of dictionaries for all the urls we can match
endpoint = reverse('oembed_json') # the public endpoint for our oembeds
providers = oembed.site.get_providers()
for provider in providers:
# first make sure this provider class is exposed at the public endpoint
if not provider.provides:
continue
match = None
if isinstance(provider, DjangoProvider):
# django providers define their regex_list by using urlreversing
url_pattern = resolver.reverse_dict.get(provider._meta.named_view)
# this regex replacement is set to be non-greedy, which results
# in things like /news/*/*/*/*/ -- this is more explicit
if url_pattern:
regex = re.sub(r'%\(.+?\)s', '*', url_pattern[0][0][0])
match = 'http://%s/%s' % (current_domain, regex)
elif isinstance(provider, HTTPProvider):
match = provider.url_scheme
else:
match = provider.regex
if match:
url_schemes.append({
'type': provider.resource_type,
'matches': match,
'endpoint': endpoint
})
url_schemes.sort(key=lambda item: item['matches'])
response = HttpResponse(mimetype='application/json')
response.write(simplejson.dumps(url_schemes))
return response | python | {
"resource": ""
} |
q263114 | main | validation | def main(path):
'''scan path directory and any subdirectories for valid captain scripts'''
basepath = os.path.abspath(os.path.expanduser(str(path)))
echo.h2("Available scripts in {}".format(basepath))
echo.br()
for root_dir, dirs, files in os.walk(basepath, topdown=True):
for f in fnmatch.filter(files, '*.py'):
try:
filepath = os.path.join(root_dir, f)
# super edge case, this makes sure the python script won't start
# an interactive console session which would cause the session
# to start and not allow the for loop to complete
with open(filepath, encoding="UTF-8") as fp:
body = fp.read()
is_console = "InteractiveConsole" in body
is_console = is_console or "code" in body
is_console = is_console and "interact(" in body
if is_console:
continue
s = captain.Script(filepath)
if s.can_run_from_cli():
rel_filepath = s.call_path(basepath)
p = s.parser
echo.h3(rel_filepath)
desc = p.description
if desc:
echo.indent(desc, indent=(" " * 4))
subcommands = s.subcommands
if subcommands:
echo.br()
echo.indent("Subcommands:", indent=(" " * 4))
for sc in subcommands.keys():
echo.indent(sc, indent=(" " * 6))
echo.br()
except captain.ParseError:
pass
except Exception as e:
#echo.exception(e)
#echo.err("Failed to parse {} because {}", f, e.message)
echo.err("Failed to parse {}", f)
echo.verbose(e.message)
echo.br() | python | {
"resource": ""
} |
q263115 | ZipTaxClient.make_request_data | validation | def make_request_data(self, zipcode, city, state):
""" Make the request params given location data """
data = {'key': self.api_key,
'postalcode': str(zipcode),
'city': city,
'state': state
}
data = ZipTaxClient._clean_request_data(data)
return data | python | {
"resource": ""
} |
q263116 | ZipTaxClient.process_response | validation | def process_response(self, resp, multiple_rates):
""" Get the tax rate from the ZipTax response """
self._check_for_exceptions(resp, multiple_rates)
rates = {}
for result in resp['results']:
rate = ZipTaxClient._cast_tax_rate(result['taxSales'])
rates[result['geoCity']] = rate
if not multiple_rates:
return rates[list(rates.keys())[0]]
return rates | python | {
"resource": ""
} |
q263117 | ZipTaxClient._check_for_exceptions | validation | def _check_for_exceptions(self, resp, multiple_rates):
""" Check if there are exceptions that should be raised """
if resp['rCode'] != 100:
raise exceptions.get_exception_for_code(resp['rCode'])(resp)
results = resp['results']
if len(results) == 0:
raise exceptions.ZipTaxNoResults('No results found')
if len(results) > 1 and not multiple_rates:
# It's fine if all the taxes are the same
rates = [result['taxSales'] for result in results]
if len(set(rates)) != 1:
raise exceptions.ZipTaxMultipleResults('Multiple results found but requested only one') | python | {
"resource": ""
} |
q263118 | get_all_text | validation | def get_all_text(node):
"""Recursively extract all text from node."""
if node.nodeType == node.TEXT_NODE:
return node.data
else:
text_string = ""
for child_node in node.childNodes:
text_string += get_all_text(child_node)
return text_string | python | {
"resource": ""
} |
q263119 | ProviderSite.register | validation | def register(self, provider_class):
"""
Registers a provider with the site.
"""
if not issubclass(provider_class, BaseProvider):
raise TypeError('%s is not a subclass of BaseProvider' % provider_class.__name__)
if provider_class in self._registered_providers:
raise AlreadyRegistered('%s is already registered' % provider_class.__name__)
if issubclass(provider_class, DjangoProvider):
# set up signal handler for cache invalidation
signals.post_save.connect(
self.invalidate_stored_oembeds,
sender=provider_class._meta.model
)
# don't build the regex yet - if not all urlconfs have been loaded
# and processed at this point, the DjangoProvider instances will fail
# when attempting to reverse urlpatterns that haven't been created.
# Rather, the regex-list will be populated once, on-demand.
self._registered_providers.append(provider_class)
# flag for re-population
self.invalidate_providers() | python | {
"resource": ""
} |
q263120 | ProviderSite.unregister | validation | def unregister(self, provider_class):
"""
Unregisters a provider from the site.
"""
if not issubclass(provider_class, BaseProvider):
raise TypeError('%s must be a subclass of BaseProvider' % provider_class.__name__)
if provider_class not in self._registered_providers:
raise NotRegistered('%s is not registered' % provider_class.__name__)
self._registered_providers.remove(provider_class)
# flag for repopulation
self.invalidate_providers() | python | {
"resource": ""
} |
q263121 | ProviderSite.populate | validation | def populate(self):
"""
Populate the internal registry's dictionary with the regexes for each
provider instance
"""
self._registry = {}
for provider_class in self._registered_providers:
instance = provider_class()
self._registry[instance] = instance.regex
for stored_provider in StoredProvider.objects.active():
self._registry[stored_provider] = stored_provider.regex
self._populated = True | python | {
"resource": ""
} |
q263122 | ProviderSite.provider_for_url | validation | def provider_for_url(self, url):
"""
Find the right provider for a URL
"""
for provider, regex in self.get_registry().items():
if re.match(regex, url) is not None:
return provider
raise OEmbedMissingEndpoint('No endpoint matches URL: %s' % url) | python | {
"resource": ""
} |
q263123 | ProviderSite.invalidate_stored_oembeds | validation | def invalidate_stored_oembeds(self, sender, instance, created, **kwargs):
"""
A hook for django-based oembed providers to delete any stored oembeds
"""
ctype = ContentType.objects.get_for_model(instance)
StoredOEmbed.objects.filter(
object_id=instance.pk,
content_type=ctype).delete() | python | {
"resource": ""
} |
q263124 | ProviderSite.embed | validation | def embed(self, url, **kwargs):
"""
The heart of the matter
"""
try:
# first figure out the provider
provider = self.provider_for_url(url)
except OEmbedMissingEndpoint:
raise
else:
try:
# check the database for a cached response, because of certain
# race conditions that exist with get_or_create(), do a filter
# lookup and just grab the first item
stored_match = StoredOEmbed.objects.filter(
match=url,
maxwidth=kwargs.get('maxwidth', None),
maxheight=kwargs.get('maxheight', None),
date_expires__gte=datetime.datetime.now())[0]
return OEmbedResource.create_json(stored_match.response_json)
except IndexError:
# query the endpoint and cache response in db
# prevent None from being passed in as a GET param
params = dict([(k, v) for k, v in kwargs.items() if v])
# request an oembed resource for the url
resource = provider.request_resource(url, **params)
try:
cache_age = int(resource.cache_age)
if cache_age < MIN_OEMBED_TTL:
cache_age = MIN_OEMBED_TTL
except:
cache_age = DEFAULT_OEMBED_TTL
date_expires = datetime.datetime.now() + datetime.timedelta(seconds=cache_age)
stored_oembed, created = StoredOEmbed.objects.get_or_create(
match=url,
maxwidth=kwargs.get('maxwidth', None),
maxheight=kwargs.get('maxheight', None))
stored_oembed.response_json = resource.json
stored_oembed.resource_type = resource.type
stored_oembed.date_expires = date_expires
if resource.content_object:
stored_oembed.content_object = resource.content_object
stored_oembed.save()
return resource | python | {
"resource": ""
} |
q263125 | ProviderSite.autodiscover | validation | def autodiscover(self, url):
"""
Load up StoredProviders from url if it is an oembed scheme
"""
headers, response = fetch_url(url)
if headers['content-type'].split(';')[0] in ('application/json', 'text/javascript'):
provider_data = json.loads(response)
return self.store_providers(provider_data) | python | {
"resource": ""
} |
q263126 | ProviderSite.store_providers | validation | def store_providers(self, provider_data):
"""
Iterate over the returned json and try to sort out any new providers
"""
if not hasattr(provider_data, '__iter__'):
raise OEmbedException('Autodiscovered response not iterable')
provider_pks = []
for provider in provider_data:
if 'endpoint' not in provider or \
'matches' not in provider:
continue
resource_type = provider.get('type')
if resource_type not in RESOURCE_TYPES:
continue
stored_provider, created = StoredProvider.objects.get_or_create(
wildcard_regex=provider['matches']
)
if created:
stored_provider.endpoint_url = relative_to_full(
provider['endpoint'],
provider['matches']
)
stored_provider.resource_type = resource_type
stored_provider.save()
provider_pks.append(stored_provider.pk)
return StoredProvider.objects.filter(pk__in=provider_pks) | python | {
"resource": ""
} |
q263127 | DjangoProvider.map_attr | validation | def map_attr(self, mapping, attr, obj):
"""
A kind of cheesy method that allows for callables or attributes to
be used interchangably
"""
if attr not in mapping and hasattr(self, attr):
if not callable(getattr(self, attr)):
mapping[attr] = getattr(self, attr)
else:
mapping[attr] = getattr(self, attr)(obj) | python | {
"resource": ""
} |
q263128 | DjangoProvider.get_image | validation | def get_image(self, obj):
"""
Return an ImageFileField instance
"""
if self._meta.image_field:
return getattr(obj, self._meta.image_field) | python | {
"resource": ""
} |
q263129 | DjangoProvider.map_to_dictionary | validation | def map_to_dictionary(self, url, obj, **kwargs):
"""
Build a dictionary of metadata for the requested object.
"""
maxwidth = kwargs.get('maxwidth', None)
maxheight = kwargs.get('maxheight', None)
provider_url, provider_name = self.provider_from_url(url)
mapping = {
'version': '1.0',
'url': url,
'provider_name': provider_name,
'provider_url': provider_url,
'type': self.resource_type
}
# a hook
self.preprocess(obj, mapping, **kwargs)
# resize image if we have a photo, otherwise use the given maximums
if self.resource_type == 'photo' and self.get_image(obj):
self.resize_photo(obj, mapping, maxwidth, maxheight)
elif self.resource_type in ('video', 'rich', 'photo'):
width, height = size_to_nearest(
maxwidth,
maxheight,
self._meta.valid_sizes,
self._meta.force_fit
)
mapping.update(width=width, height=height)
# create a thumbnail
if self.get_image(obj):
self.thumbnail(obj, mapping)
# map attributes to the mapping dictionary. if the attribute is
# a callable, it must have an argument signature of
# (self, obj)
for attr in ('title', 'author_name', 'author_url', 'html'):
self.map_attr(mapping, attr, obj)
# fix any urls
if 'url' in mapping:
mapping['url'] = relative_to_full(mapping['url'], url)
if 'thumbnail_url' in mapping:
mapping['thumbnail_url'] = relative_to_full(mapping['thumbnail_url'], url)
if 'html' not in mapping and mapping['type'] in ('video', 'rich'):
mapping['html'] = self.render_html(obj, context=Context(mapping))
# a hook
self.postprocess(obj, mapping, **kwargs)
return mapping | python | {
"resource": ""
} |
q263130 | DjangoDateBasedProvider.get_object | validation | def get_object(self, url, month_format='%b', day_format='%d'):
"""
Parses the date from a url and uses it in the query. For objects which
are unique for date.
"""
params = self.get_params(url)
try:
year = params[self._meta.year_part]
month = params[self._meta.month_part]
day = params[self._meta.day_part]
except KeyError:
try:
# named lookups failed, so try to get the date using the first
# three parameters
year, month, day = params['_0'], params['_1'], params['_2']
except KeyError:
raise OEmbedException('Error extracting date from url parameters')
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise OEmbedException('Error parsing date from: %s' % url)
# apply the date-specific lookups
if isinstance(self._meta.model._meta.get_field(self._meta.date_field), DateTimeField):
min_date = datetime.datetime.combine(date, datetime.time.min)
max_date = datetime.datetime.combine(date, datetime.time.max)
query = {'%s__range' % self._meta.date_field: (min_date, max_date)}
else:
query = {self._meta.date_field: date}
# apply the regular search lookups
for key, value in self._meta.fields_to_match.iteritems():
try:
query[value] = params[key]
except KeyError:
raise OEmbedException('%s was not found in the urlpattern parameters. Valid names are: %s' % (key, ', '.join(params.keys())))
try:
obj = self.get_queryset().get(**query)
except self._meta.model.DoesNotExist:
raise OEmbedException('Requested object not found')
return obj | python | {
"resource": ""
} |
q263131 | Inspire2CDS.get_record | validation | def get_record(self):
"""Override the base."""
self.recid = self.get_recid()
self.remove_controlfields()
self.update_system_numbers()
self.add_systemnumber("Inspire", recid=self.recid)
self.add_control_number("003", "SzGeCERN")
self.update_collections()
self.update_languages()
self.update_reportnumbers()
self.update_authors()
self.update_journals()
self.update_subject_categories("INSPIRE", "SzGeCERN", "categories_cds")
self.update_pagenumber()
self.update_notes()
self.update_experiments()
self.update_isbn()
self.update_dois()
self.update_links_and_ffts()
self.update_date()
self.update_date_year()
self.update_hidden_notes()
self.update_oai_info()
self.update_cnum()
self.update_conference_info()
self.fields_list = [
"909", "541", "961",
"970", "690", "695",
"981",
]
self.strip_fields()
if "ANNOUNCEMENT" in self.collections:
self.update_conference_111()
self.update_conference_links()
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")])
if "THESIS" in self.collections:
self.update_thesis_information()
self.update_thesis_supervisors()
if "PROCEEDINGS" in self.collections:
# Special proceeding syntax
self.update_title_to_proceeding()
self.update_author_to_proceeding()
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CONFERENCE")])
# 690 tags
if self.tag_as_cern:
record_add_field(self.record, "690", ind1="C", subfields=[("a", "CERN")])
return self.record | python | {
"resource": ""
} |
q263132 | Inspire2CDS.update_oai_info | validation | def update_oai_info(self):
"""Add the 909 OAI info to 035."""
for field in record_get_field_instances(self.record, '909', ind1="C", ind2="O"):
new_subs = []
for tag, value in field[0]:
if tag == "o":
new_subs.append(("a", value))
else:
new_subs.append((tag, value))
if value in ["CERN", "CDS", "ForCDS"]:
self.tag_as_cern = True
record_add_field(self.record, '024', ind1="8", subfields=new_subs)
record_delete_fields(self.record, '909') | python | {
"resource": ""
} |
q263133 | Inspire2CDS.update_cnum | validation | def update_cnum(self):
"""Check if we shall add cnum in 035."""
if "ConferencePaper" not in self.collections:
cnums = record_get_field_values(self.record, '773', code="w")
for cnum in cnums:
cnum_subs = [
("9", "INSPIRE-CNUM"),
("a", cnum)
]
record_add_field(self.record, "035", subfields=cnum_subs) | python | {
"resource": ""
} |
q263134 | Inspire2CDS.update_hidden_notes | validation | def update_hidden_notes(self):
"""Remove hidden notes and tag a CERN if detected."""
if not self.tag_as_cern:
notes = record_get_field_instances(self.record,
tag="595")
for field in notes:
for dummy, value in field[0]:
if value == "CDS":
self.tag_as_cern = True
record_delete_fields(self.record, tag="595") | python | {
"resource": ""
} |
q263135 | Inspire2CDS.update_notes | validation | def update_notes(self):
"""Remove INSPIRE specific notes."""
fields = record_get_field_instances(self.record, '500')
for field in fields:
subs = field_get_subfields(field)
for sub in subs.get('a', []):
sub = sub.strip() # remove any spaces before/after
if sub.startswith("*") and sub.endswith("*"):
record_delete_field(self.record, tag="500",
field_position_global=field[4]) | python | {
"resource": ""
} |
q263136 | Inspire2CDS.update_title_to_proceeding | validation | def update_title_to_proceeding(self):
"""Move title info from 245 to 111 proceeding style."""
titles = record_get_field_instances(self.record,
tag="245")
for title in titles:
subs = field_get_subfields(title)
new_subs = []
if "a" in subs:
new_subs.append(("a", subs['a'][0]))
if "b" in subs:
new_subs.append(("c", subs['b'][0]))
record_add_field(self.record,
tag="111",
subfields=new_subs)
record_delete_fields(self.record, tag="245")
record_delete_fields(self.record, tag="246") | python | {
"resource": ""
} |
q263137 | Inspire2CDS.update_reportnumbers | validation | def update_reportnumbers(self):
"""Update reportnumbers."""
report_037_fields = record_get_field_instances(self.record, '037')
for field in report_037_fields:
subs = field_get_subfields(field)
for val in subs.get("a", []):
if "arXiv" not in val:
record_delete_field(self.record,
tag="037",
field_position_global=field[4])
new_subs = [(code, val[0]) for code, val in subs.items()]
record_add_field(self.record, "088", subfields=new_subs)
break | python | {
"resource": ""
} |
q263138 | Inspire2CDS.update_isbn | validation | def update_isbn(self):
"""Remove dashes from ISBN."""
isbns = record_get_field_instances(self.record, '020')
for field in isbns:
for idx, (key, value) in enumerate(field[0]):
if key == 'a':
field[0][idx] = ('a', value.replace("-", "").strip()) | python | {
"resource": ""
} |
q263139 | Inspire2CDS.update_dois | validation | def update_dois(self):
"""Remove duplicate BibMatch DOIs."""
dois = record_get_field_instances(self.record, '024', ind1="7")
all_dois = {}
for field in dois:
subs = field_get_subfield_instances(field)
subs_dict = dict(subs)
if subs_dict.get('a'):
if subs_dict['a'] in all_dois:
record_delete_field(self.record, tag='024', ind1='7', field_position_global=field[4])
continue
all_dois[subs_dict['a']] = field | python | {
"resource": ""
} |
q263140 | Inspire2CDS.update_date_year | validation | def update_date_year(self):
"""260 Date normalization."""
dates = record_get_field_instances(self.record, '260')
for field in dates:
for idx, (key, value) in enumerate(field[0]):
if key == 'c':
field[0][idx] = ('c', value[:4])
elif key == 't':
del field[0][idx]
if not dates:
published_years = record_get_field_values(self.record, "773", code="y")
if published_years:
record_add_field(
self.record, "260", subfields=[("c", published_years[0][:4])])
else:
other_years = record_get_field_values(self.record, "269", code="c")
if other_years:
record_add_field(
self.record, "260", subfields=[("c", other_years[0][:4])]) | python | {
"resource": ""
} |
q263141 | Inspire2CDS.update_languages | validation | def update_languages(self):
"""041 Language."""
language_fields = record_get_field_instances(self.record, '041')
language = "eng"
record_delete_fields(self.record, "041")
for field in language_fields:
subs = field_get_subfields(field)
if 'a' in subs:
language = self.get_config_item(subs['a'][0], "languages")
break
new_subs = [('a', language)]
record_add_field(self.record, "041", subfields=new_subs) | python | {
"resource": ""
} |
q263142 | generate_dirlist_html | validation | def generate_dirlist_html(FS, filepath):
"""
Generate directory listing HTML
Arguments:
FS (FS): filesystem object to read files from
filepath (str): path to generate directory listings for
Keyword Arguments:
list_dir (callable: list[str]): list file names in a directory
isdir (callable: bool): os.path.isdir
Yields:
str: lines of an HTML table
"""
yield '<table class="dirlist">'
if filepath == '/':
filepath = ''
for name in FS.listdir(filepath):
full_path = pathjoin(filepath, name)
if FS.isdir(full_path):
full_path = full_path + '/'
yield u'<tr><td><a href="{0}">{0}</a></td></tr>'.format(
cgi.escape(full_path)) # TODO XXX
yield '</table>' | python | {
"resource": ""
} |
q263143 | check_pkgs_integrity | validation | def check_pkgs_integrity(filelist, logger, ftp_connector,
timeout=120, sleep_time=10):
"""
Checks if files are not being uploaded to server.
@timeout - time after which the script will register an error.
"""
ref_1 = []
ref_2 = []
i = 1
print >> sys.stdout, "\nChecking packages integrity."
for filename in filelist:
# ref_1.append(self.get_remote_file_size(filename))
get_remote_file_size(ftp_connector, filename, ref_1)
print >> sys.stdout, "\nGoing to sleep for %i sec." % (sleep_time,)
time.sleep(sleep_time)
while sleep_time*i < timeout:
for filename in filelist:
# ref_2.append(self.get_remote_file_size(filename))
get_remote_file_size(ftp_connector, filename, ref_2)
if ref_1 == ref_2:
print >> sys.stdout, "\nIntegrity OK:)"
logger.info("Packages integrity OK.")
break
else:
print >> sys.stdout, "\nWaiting %d time for itegrity..." % (i,)
logger.info("\nWaiting %d time for itegrity..." % (i,))
i += 1
ref_1, ref_2 = ref_2, []
time.sleep(sleep_time)
else:
not_finished_files = []
for count, val1 in enumerate(ref_1):
if val1 != ref_2[count]:
not_finished_files.append(filelist[count])
print >> sys.stdout, "\nOMG, OMG something wrong with integrity."
logger.error("Integrity check faild for files %s"
% (not_finished_files,)) | python | {
"resource": ""
} |
q263144 | fix_name_capitalization | validation | def fix_name_capitalization(lastname, givennames):
""" Converts capital letters to lower keeps first letter capital. """
lastnames = lastname.split()
if len(lastnames) == 1:
if '-' in lastname:
names = lastname.split('-')
names = map(lambda a: a[0] + a[1:].lower(), names)
lastname = '-'.join(names)
else:
lastname = lastname[0] + lastname[1:].lower()
else:
names = []
for name in lastnames:
if re.search(r'[A-Z]\.', name):
names.append(name)
else:
names.append(name[0] + name[1:].lower())
lastname = ' '.join(names)
lastname = collapse_initials(lastname)
names = []
for name in givennames:
if re.search(r'[A-Z]\.', name):
names.append(name)
else:
names.append(name[0] + name[1:].lower())
givennames = ' '.join(names)
return lastname, givennames | python | {
"resource": ""
} |
q263145 | OEmbedConsumer.extract_oembeds | validation | def extract_oembeds(self, text, maxwidth=None, maxheight=None, resource_type=None):
"""
Scans a block of text and extracts oembed data on any urls,
returning it in a list of dictionaries
"""
parser = text_parser()
urls = parser.extract_urls(text)
return self.handle_extracted_urls(urls, maxwidth, maxheight, resource_type) | python | {
"resource": ""
} |
q263146 | OEmbedConsumer.strip | validation | def strip(self, text, *args, **kwargs):
"""
Try to maintain parity with what is extracted by extract since strip
will most likely be used in conjunction with extract
"""
if OEMBED_DEFAULT_PARSE_HTML:
extracted = self.extract_oembeds_html(text, *args, **kwargs)
else:
extracted = self.extract_oembeds(text, *args, **kwargs)
matches = [r['original_url'] for r in extracted]
match_handler = lambda m: m.group() not in matches and m.group() or ''
return re.sub(URL_RE, match_handler, text) | python | {
"resource": ""
} |
q263147 | autodiscover | validation | def autodiscover():
"""
Automatically build the provider index.
"""
import imp
from django.conf import settings
for app in settings.INSTALLED_APPS:
try:
app_path = __import__(app, {}, {}, [app.split('.')[-1]]).__path__
except AttributeError:
continue
try:
imp.find_module('oembed_providers', app_path)
except ImportError:
continue
__import__("%s.oembed_providers" % app) | python | {
"resource": ""
} |
q263148 | select | validation | def select(options=None):
""" pass in a list of options, promt the user to select one, and return the selected option or None """
if not options:
return None
width = len(str(len(options)))
for x,option in enumerate(options):
sys.stdout.write('{:{width}}) {}\n'.format(x+1,option, width=width))
sys.stdout.write('{:>{width}} '.format('#?', width=width+1))
sys.stdout.flush()
if sys.stdin.isatty():
# regular prompt
try:
response = raw_input().strip()
except (EOFError, KeyboardInterrupt):
# handle ctrl-d, ctrl-c
response = ''
else:
# try connecting to current tty, when using pipes
sys.stdin = open("/dev/tty")
try:
response = ''
while True:
response += sys.stdin.read(1)
if response.endswith('\n'):
break
except (EOFError, KeyboardInterrupt):
sys.stdout.flush()
pass
try:
response = int(response) - 1
except ValueError:
return None
if response < 0 or response >= len(options):
return None
return options[response] | python | {
"resource": ""
} |
q263149 | main | validation | def main():
argparser = ArgumentParser()
subparsers = argparser.add_subparsers(dest='selected_subparser')
all_parser = subparsers.add_parser('all')
elsevier_parser = subparsers.add_parser('elsevier')
oxford_parser = subparsers.add_parser('oxford')
springer_parser = subparsers.add_parser('springer')
all_parser.add_argument('--update-credentials', action='store_true')
elsevier_parser.add_argument('--run-locally', action='store_true')
elsevier_parser.add_argument('--package-name')
elsevier_parser.add_argument('--path')
elsevier_parser.add_argument('--CONSYN', action='store_true')
elsevier_parser.add_argument('--update-credentials', action='store_true')
elsevier_parser.add_argument('--extract-nations', action='store_true')
oxford_parser.add_argument('--dont-empty-ftp', action='store_true')
oxford_parser.add_argument('--package-name')
oxford_parser.add_argument('--path')
oxford_parser.add_argument('--update-credentials', action='store_true')
oxford_parser.add_argument('--extract-nations', action='store_true')
springer_parser.add_argument('--package-name')
springer_parser.add_argument('--path')
springer_parser.add_argument('--update-credentials', action='store_true')
springer_parser.add_argument('--extract-nations', action='store_true')
'''
Transforms the argparse arguments from Namespace to dict and then to Bunch
Therefore it is not necessary to access the arguments using the dict syntax
The settings can be called like regular vars on the settings object
'''
settings = Bunch(vars(argparser.parse_args()))
call_package(settings) | python | {
"resource": ""
} |
q263150 | PosPackage.get_record | validation | def get_record(self, record):
""" Reads a dom xml element in oaidc format and
returns the bibrecord object """
self.document = record
rec = create_record()
language = self._get_language()
if language and language != 'en':
record_add_field(rec, '041', subfields=[('a', language)])
publisher = self._get_publisher()
date = self._get_date()
if publisher and date:
record_add_field(rec, '260', subfields=[('b', publisher),
('c', date)])
elif publisher:
record_add_field(rec, '260', subfields=[('b', publisher)])
elif date:
record_add_field(rec, '260', subfields=[('c', date)])
title = self._get_title()
if title:
record_add_field(rec, '245', subfields=[('a', title)])
record_copyright = self._get_copyright()
if record_copyright:
record_add_field(rec, '540', subfields=[('a', record_copyright)])
subject = self._get_subject()
if subject:
record_add_field(rec, '650', ind1='1', ind2='7', subfields=[('a', subject),
('2', 'PoS')])
authors = self._get_authors()
first_author = True
for author in authors:
subfields = [('a', author[0])]
for affiliation in author[1]:
subfields.append(('v', affiliation))
if first_author:
record_add_field(rec, '100', subfields=subfields)
first_author = False
else:
record_add_field(rec, '700', subfields=subfields)
identifier = self.get_identifier()
conference = identifier.split(':')[2]
conference = conference.split('/')[0]
contribution = identifier.split(':')[2]
contribution = contribution.split('/')[1]
record_add_field(rec, '773', subfields=[('p', 'PoS'),
('v', conference.replace(' ', '')),
('c', contribution),
('y', date[:4])])
record_add_field(rec, '980', subfields=[('a', 'ConferencePaper')])
record_add_field(rec, '980', subfields=[('a', 'HEP')])
return rec | python | {
"resource": ""
} |
q263151 | progress | validation | def progress(length, **kwargs):
"""display a progress that can update in place
example --
total_length = 1000
with echo.progress(total_length) as p:
for x in range(total_length):
# do something crazy
p.update(x)
length -- int -- the total size of what you will be updating progress on
"""
quiet = False
progress_class = kwargs.pop("progress_class", Progress)
kwargs["write_method"] = istdout.info
kwargs["width"] = kwargs.get("width", globals()["WIDTH"])
kwargs["length"] = length
pbar = progress_class(**kwargs)
pbar.update(0)
yield pbar
pbar.update(length)
br() | python | {
"resource": ""
} |
q263152 | err | validation | def err(format_msg, *args, **kwargs):
'''print format_msg to stderr'''
exc_info = kwargs.pop("exc_info", False)
stderr.warning(str(format_msg).format(*args, **kwargs), exc_info=exc_info) | python | {
"resource": ""
} |
q263153 | banner | validation | def banner(*lines, **kwargs):
"""prints a banner
sep -- string -- the character that will be on the line on the top and bottom
and before any of the lines, defaults to *
count -- integer -- the line width, defaults to 80
"""
sep = kwargs.get("sep", "*")
count = kwargs.get("width", globals()["WIDTH"])
out(sep * count)
if lines:
out(sep)
for line in lines:
out("{} {}".format(sep, line))
out(sep)
out(sep * count) | python | {
"resource": ""
} |
q263154 | table | validation | def table(*columns, **kwargs):
"""
format columned data so we can easily print it out on a console, this just takes
columns of data and it will format it into properly aligned columns, it's not
fancy, but it works for most type of strings that I need it for, like server name
lists.
other formatting options:
http://stackoverflow.com/a/8234511/5006
other packages that probably do this way better:
https://stackoverflow.com/a/26937531/5006
:Example:
>>> echo.table([(1, 2), (3, 4), (5, 6), (7, 8), (9, 0)])
1 2
3 4
5 6
7 8
9 0
>>> echo.table([1, 3, 5, 7, 9], [2, 4, 6, 8, 0])
1 2
3 4
5 6
7 8
9 0
:param *columns: can either be a list of rows or multiple lists representing each
column in the table
:param **kwargs: dict
prefix -- string -- what you want before each row (eg, a tab)
buf_count -- integer -- how many spaces between longest col value and its neighbor
headers -- list -- the headers you want, must match column count
widths -- list -- the widths of each column you want to use, this doesn't have
to match column count, so you can do something like [0, 5] to set the
width of the second column
width -- int -- similar to widths except it will set this value for all columns
"""
ret = []
prefix = kwargs.get('prefix', '')
buf_count = kwargs.get('buf_count', 2)
if len(columns) == 1:
columns = list(columns[0])
else:
# without the list the zip iterator gets spent, I'm sure I can make this
# better
columns = list(zip(*columns))
headers = kwargs.get("headers", [])
if headers:
columns.insert(0, headers)
# we have to go through all the rows and calculate the length of each
# column of each row
widths = kwargs.get("widths", [])
row_counts = Counter()
for i in range(len(widths)):
row_counts[i] = int(widths[i])
width = int(kwargs.get("width", 0))
for row in columns:
for i, c in enumerate(row):
if isinstance(c, basestring):
cl = len(c)
else:
cl = len(str(c))
if cl > row_counts[i]:
row_counts[i] = cl
width = int(kwargs.get("width", 0))
if width:
for i in row_counts:
if row_counts[i] < width:
row_counts[i] = width
# actually go through and format each row
def colstr(c):
if isinstance(c, basestring): return c
return str(c)
def rowstr(row, prefix, row_counts):
row_format = prefix
cols = list(map(colstr, row))
for i in range(len(row_counts)):
c = cols[i]
# build the format string for each row, we use the row_counts found
# above to decide how much padding each column should get
# https://stackoverflow.com/a/9536084/5006
if re.match(r"^\d+(?:\.\d+)?$", c):
if i == 0:
row_format += "{:>" + str(row_counts[i]) + "}"
else:
row_format += "{:>" + str(row_counts[i] + buf_count) + "}"
else:
row_format += "{:<" + str(row_counts[i] + buf_count) + "}"
return row_format.format(*cols)
for row in columns:
ret.append(rowstr(row, prefix, row_counts))
out(os.linesep.join(ret)) | python | {
"resource": ""
} |
q263155 | prompt | validation | def prompt(question, choices=None):
"""echo a prompt to the user and wait for an answer
question -- string -- the prompt for the user
choices -- list -- if given, only exit when prompt matches one of the choices
return -- string -- the answer that was given by the user
"""
if not re.match("\s$", question):
question = "{}: ".format(question)
while True:
if sys.version_info[0] > 2:
answer = input(question)
else:
answer = raw_input(question)
if not choices or answer in choices:
break
return answer | python | {
"resource": ""
} |
q263156 | SpringerCrawler.get_records | validation | def get_records(self, url):
"""
Returns the records listed in the webpage given as
parameter as a xml String.
@param url: the url of the Journal, Book, Protocol or Reference work
"""
page = urllib2.urlopen(url)
pages = [BeautifulSoup(page)]
#content spread over several pages?
numpag = pages[0].body.findAll('span', attrs={'class': 'number-of-pages'})
if len(numpag) > 0:
if re.search('^\d+$', numpag[0].string):
for i in range(int(numpag[0].string)-1):
page = urllib2.urlopen('%s/page/%i' % (url, i+2))
pages.append(BeautifulSoup(page))
else:
print("number of pages %s not an integer" % (numpag[0].string))
impl = getDOMImplementation()
doc = impl.createDocument(None, "collection", None)
links = []
for page in pages:
links += page.body.findAll('p', attrs={'class': 'title'})
links += page.body.findAll('h3', attrs={'class': 'title'})
for link in links:
record = self._get_record(link)
doc.firstChild.appendChild(record)
return doc.toprettyxml() | python | {
"resource": ""
} |
q263157 | OxfordPackage.connect | validation | def connect(self):
"""Logs into the specified ftp server and returns connector."""
for tried_connection_count in range(CFG_FTP_CONNECTION_ATTEMPTS):
try:
self.ftp = FtpHandler(self.config.OXFORD.URL,
self.config.OXFORD.LOGIN,
self.config.OXFORD.PASSWORD)
self.logger.debug(("Successful connection to the "
"Oxford University Press server"))
return
except socket_timeout_exception as err:
self.logger.error(('Failed to connect %d of %d times. '
'Will sleep for %d seconds and try again.')
% (tried_connection_count+1,
CFG_FTP_CONNECTION_ATTEMPTS,
CFG_FTP_TIMEOUT_SLEEP_DURATION))
time.sleep(CFG_FTP_TIMEOUT_SLEEP_DURATION)
except Exception as err:
self.logger.error(('Failed to connect to the Oxford '
'University Press server. %s') % (err,))
break
raise LoginException(err) | python | {
"resource": ""
} |
q263158 | NuHeatThermostat.schedule_mode | validation | def schedule_mode(self, mode):
"""
Set the thermostat mode
:param mode: The desired mode integer value.
Auto = 1
Temporary hold = 2
Permanent hold = 3
"""
modes = [config.SCHEDULE_RUN, config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD]
if mode not in modes:
raise Exception("Invalid mode. Please use one of: {}".format(modes))
self.set_data({"ScheduleMode": mode}) | python | {
"resource": ""
} |
q263159 | NuHeatThermostat.set_target_fahrenheit | validation | def set_target_fahrenheit(self, fahrenheit, mode=config.SCHEDULE_HOLD):
"""
Set the target temperature to the desired fahrenheit, with more granular control of the
hold mode
:param fahrenheit: The desired temperature in F
:param mode: The desired mode to operate in
"""
temperature = fahrenheit_to_nuheat(fahrenheit)
self.set_target_temperature(temperature, mode) | python | {
"resource": ""
} |
q263160 | NuHeatThermostat.set_target_celsius | validation | def set_target_celsius(self, celsius, mode=config.SCHEDULE_HOLD):
"""
Set the target temperature to the desired celsius, with more granular control of the hold
mode
:param celsius: The desired temperature in C
:param mode: The desired mode to operate in
"""
temperature = celsius_to_nuheat(celsius)
self.set_target_temperature(temperature, mode) | python | {
"resource": ""
} |
q263161 | NuHeatThermostat.set_target_temperature | validation | def set_target_temperature(self, temperature, mode=config.SCHEDULE_HOLD):
"""
Updates the target temperature on the NuHeat API
:param temperature: The desired temperature in NuHeat format
:param permanent: Permanently hold the temperature. If set to False, the schedule will
resume at the next programmed event
"""
if temperature < self.min_temperature:
temperature = self.min_temperature
if temperature > self.max_temperature:
temperature = self.max_temperature
modes = [config.SCHEDULE_TEMPORARY_HOLD, config.SCHEDULE_HOLD]
if mode not in modes:
raise Exception("Invalid mode. Please use one of: {}".format(modes))
self.set_data({
"SetPointTemp": temperature,
"ScheduleMode": mode
}) | python | {
"resource": ""
} |
q263162 | load_config | validation | def load_config(filename=None, section_option_dict={}):
"""
This function returns a Bunch object from the stated config file.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
NOTE:
The values are not evaluated by default.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
filename:
The desired config file to read.
The config file must be written in a syntax readable to the
ConfigParser module -> INI syntax
[sectionA]
optionA1 = ...
optionA2 = ...
section_option_dict:
A dictionary that contains keys, which are associated to the sections
in the config file, and values, which are a list of the desired
options.
If empty, everything will be loaded.
If the lists are empty, everything from the sections will be loaded.
Example:
dict = {'sectionA': ['optionA1', 'optionA2', ...],
'sectionB': ['optionB1', 'optionB2', ...]}
config = get_config('config.cfg', dict)
config.sectionA.optionA1
Other:
Bunch can be found in configparser.py
"""
config = ConfigParser()
config.read(filename)
working_dict = _prepare_working_dict(config, section_option_dict)
tmp_dict = {}
for section, options in working_dict.iteritems():
tmp_dict[section] = {}
for option in options:
tmp_dict[section][option] = config.get(section, option)
return Bunch(tmp_dict) | python | {
"resource": ""
} |
q263163 | NuHeat.authenticate | validation | def authenticate(self):
"""
Authenticate against the NuHeat API
"""
if self._session_id:
_LOGGER.debug("Using existing NuHeat session")
return
_LOGGER.debug("Creating NuHeat session")
post_data = {
"Email": self.username,
"Password": self.password,
"application": "0"
}
data = self.request(config.AUTH_URL, method="POST", data=post_data)
session_id = data.get("SessionId")
if not session_id:
raise Exception("Authentication error")
self._session_id = session_id | python | {
"resource": ""
} |
q263164 | NuHeat.request | validation | def request(self, url, method="GET", data=None, params=None, retry=True):
"""
Make a request to the NuHeat API
:param url: The URL to request
:param method: The type of request to make (GET, POST)
:param data: Data to be sent along with POST requests
:param params: Querystring parameters
:param retry: Attempt to re-authenticate and retry request if necessary
"""
headers = config.REQUEST_HEADERS
if params and self._session_id:
params['sessionid'] = self._session_id
if method == "GET":
response = requests.get(url, headers=headers, params=params)
elif method == "POST":
response = requests.post(url, headers=headers, params=params, data=data)
# Handle expired sessions
if response.status_code == 401 and retry:
_LOGGER.warn("NuHeat APIrequest unauthorized [401]. Try to re-authenticate.")
self._session_id = None
self.authenticate()
return self.request(url, method=method, data=data, params=params, retry=False)
response.raise_for_status()
try:
return response.json()
except ValueError:
# No JSON object
return response | python | {
"resource": ""
} |
q263165 | MathMLParser.handle_starttag | validation | def handle_starttag(self, tag, attrs):
"""Return representation of html start tag and attributes."""
if tag in self.mathml_elements:
final_attr = ""
for key, value in attrs:
final_attr += ' {0}="{1}"'.format(key, value)
self.fed.append("<{0}{1}>".format(tag, final_attr)) | python | {
"resource": ""
} |
q263166 | MathMLParser.handle_endtag | validation | def handle_endtag(self, tag):
"""Return representation of html end tag."""
if tag in self.mathml_elements:
self.fed.append("</{0}>".format(tag)) | python | {
"resource": ""
} |
q263167 | MathMLParser.html_to_text | validation | def html_to_text(cls, html):
"""Return stripped HTML, keeping only MathML."""
s = cls()
s.feed(html)
unescaped_data = s.unescape(s.get_data())
return escape_for_xml(unescaped_data, tags_to_keep=s.mathml_elements) | python | {
"resource": ""
} |
q263168 | CallbackInspect.is_instance | validation | def is_instance(self):
"""return True if callback is an instance of a class"""
ret = False
val = self.callback
if self.is_class(): return False
ret = not inspect.isfunction(val) and not inspect.ismethod(val)
# if is_py2:
# ret = isinstance(val, types.InstanceType) or hasattr(val, '__dict__') \
# and not (hasattr(val, 'func_name') or hasattr(val, 'im_func'))
#
# else:
# ret = not inspect.isfunction(val) and not inspect.ismethod(val)
return ret | python | {
"resource": ""
} |
q263169 | CallbackInspect.is_function | validation | def is_function(self):
"""return True if callback is a vanilla plain jane function"""
if self.is_instance() or self.is_class(): return False
return isinstance(self.callback, (Callable, classmethod)) | python | {
"resource": ""
} |
q263170 | ScriptKwarg.merge_kwargs | validation | def merge_kwargs(self, kwargs):
"""these kwargs come from the @arg decorator, they are then merged into any
keyword arguments that were automatically generated from the main function
introspection"""
if kwargs:
self.parser_kwargs.update(kwargs)
#self.parser_kwargs['dest'] = self.name
self.parser_kwargs.setdefault('dest', self.name)
# special handling of any passed in values
if 'default' in kwargs:
# NOTE -- this doesn't use .set_default() because that is meant to
# parse from the function definition so it actually has different syntax
# than what the .set_default() method does. eg, @arg("--foo", default=[1, 2]) means
# that the default value should be an array with 1 and 2 in it, where main(foo=[1, 2])
# means foo should be constrained to choices=[1, 2]
self.parser_kwargs["default"] = kwargs["default"]
self.parser_kwargs["required"] = False
elif 'action' in kwargs:
if kwargs['action'] in set(['store_false', 'store_true']):
self.parser_kwargs['required'] = False
elif kwargs['action'] in set(['version']):
self.parser_kwargs.pop('required', False)
else:
self.parser_kwargs.setdefault("required", True) | python | {
"resource": ""
} |
q263171 | ScriptKwarg.merge_from_list | validation | def merge_from_list(self, list_args):
"""find any matching parser_args from list_args and merge them into this
instance
list_args -- list -- an array of (args, kwargs) tuples
"""
def xs(name, parser_args, list_args):
"""build the generator of matching list_args"""
for args, kwargs in list_args:
if len(set(args) & parser_args) > 0:
yield args, kwargs
else:
if 'dest' in kwargs:
if kwargs['dest'] == name:
yield args, kwargs
for args, kwargs in xs(self.name, self.parser_args, list_args):
self.merge_args(args)
self.merge_kwargs(kwargs) | python | {
"resource": ""
} |
q263172 | HelpFormatter._fill_text | validation | def _fill_text(self, text, width, indent):
"""Overridden to not get rid of newlines
https://github.com/python/cpython/blob/2.7/Lib/argparse.py#L620"""
lines = []
for line in text.splitlines(False):
if line:
# https://docs.python.org/2/library/textwrap.html
lines.extend(textwrap.wrap(
line.strip(),
width,
initial_indent=indent,
subsequent_indent=indent
))
else:
lines.append(line)
text = "\n".join(lines)
return text | python | {
"resource": ""
} |
q263173 | make_user_agent | validation | def make_user_agent(component=None):
""" create string suitable for HTTP User-Agent header """
packageinfo = pkg_resources.require("harvestingkit")[0]
useragent = "{0}/{1}".format(packageinfo.project_name, packageinfo.version)
if component is not None:
useragent += " {0}".format(component)
return useragent | python | {
"resource": ""
} |
q263174 | record_add_field | validation | def record_add_field(rec, tag, ind1='', ind2='', subfields=[],
controlfield_value=''):
"""Add a MARCXML datafield as a new child to a XML document."""
if controlfield_value:
doc = etree.Element("controlfield",
attrib={
"tag": tag,
})
doc.text = unicode(controlfield_value)
else:
doc = etree.Element("datafield",
attrib={
"tag": tag,
"ind1": ind1,
"ind2": ind2,
})
for code, value in subfields:
field = etree.SubElement(doc, "subfield", attrib={"code": code})
field.text = value
rec.append(doc)
return rec | python | {
"resource": ""
} |
q263175 | record_xml_output | validation | def record_xml_output(rec, pretty=True):
"""Given a document, return XML prettified."""
from .html_utils import MathMLParser
ret = etree.tostring(rec, xml_declaration=False)
# Special MathML handling
ret = re.sub("(<)(([\/]?{0}))".format("|[\/]?".join(MathMLParser.mathml_elements)), '<\g<2>', ret)
ret = re.sub(">", '>', ret)
if pretty:
# We are doing our own prettyfication as etree pretty_print is too insane.
ret = ret.replace('</datafield>', ' </datafield>\n')
ret = re.sub(r'<datafield(.*?)>', r' <datafield\1>\n', ret)
ret = ret.replace('</subfield>', '</subfield>\n')
ret = ret.replace('<subfield', ' <subfield')
ret = ret.replace('record>', 'record>\n')
return ret | python | {
"resource": ""
} |
q263176 | escape_for_xml | validation | def escape_for_xml(data, tags_to_keep=None):
"""Transform & and < to XML valid & and <.
Pass a list of tags as string to enable replacement of
'<' globally but keep any XML tags in the list.
"""
data = re.sub("&", "&", data)
if tags_to_keep:
data = re.sub(r"(<)(?![\/]?({0})\b)".format("|".join(tags_to_keep)), '<', data)
else:
data = re.sub("<", "<", data)
return data | python | {
"resource": ""
} |
q263177 | format_arxiv_id | validation | def format_arxiv_id(arxiv_id):
"""Properly format arXiv IDs."""
if arxiv_id and "/" not in arxiv_id and "arXiv" not in arxiv_id:
return "arXiv:%s" % (arxiv_id,)
elif arxiv_id and '.' not in arxiv_id and arxiv_id.lower().startswith('arxiv:'):
return arxiv_id[6:] # strip away arxiv: for old identifiers
else:
return arxiv_id | python | {
"resource": ""
} |
q263178 | fix_journal_name | validation | def fix_journal_name(journal, knowledge_base):
"""Convert journal name to Inspire's short form."""
if not journal:
return '', ''
if not knowledge_base:
return journal, ''
if len(journal) < 2:
return journal, ''
volume = ''
if (journal[-1] <= 'Z' and journal[-1] >= 'A') \
and (journal[-2] == '.' or journal[-2] == ' '):
volume += journal[-1]
journal = journal[:-1]
journal = journal.strip()
if journal.upper() in knowledge_base:
journal = knowledge_base[journal.upper()].strip()
elif journal in knowledge_base:
journal = knowledge_base[journal].strip()
elif '.' in journal:
journalnodots = journal.replace('. ', ' ')
journalnodots = journalnodots.replace('.', ' ').strip().upper()
if journalnodots in knowledge_base:
journal = knowledge_base[journalnodots].strip()
journal = journal.replace('. ', '.')
return journal, volume | python | {
"resource": ""
} |
q263179 | add_nations_field | validation | def add_nations_field(authors_subfields):
"""Add correct nations field according to mapping in NATIONS_DEFAULT_MAP."""
from .config import NATIONS_DEFAULT_MAP
result = []
for field in authors_subfields:
if field[0] == 'v':
values = [x.replace('.', '') for x in field[1].split(', ')]
possible_affs = filter(lambda x: x is not None,
map(NATIONS_DEFAULT_MAP.get, values))
if 'CERN' in possible_affs and 'Switzerland' in possible_affs:
# Don't use remove in case of multiple Switzerlands
possible_affs = [x for x in possible_affs
if x != 'Switzerland']
result.extend(possible_affs)
result = sorted(list(set(result)))
if result:
authors_subfields.extend([('w', res) for res in result])
else:
authors_subfields.append(('w', 'HUMAN CHECK')) | python | {
"resource": ""
} |
q263180 | fix_dashes | validation | def fix_dashes(string):
"""Fix bad Unicode special dashes in string."""
string = string.replace(u'\u05BE', '-')
string = string.replace(u'\u1806', '-')
string = string.replace(u'\u2E3A', '-')
string = string.replace(u'\u2E3B', '-')
string = unidecode(string)
return re.sub(r'--+', '-', string) | python | {
"resource": ""
} |
q263181 | fix_title_capitalization | validation | def fix_title_capitalization(title):
"""Try to capitalize properly a title string."""
if re.search("[A-Z]", title) and re.search("[a-z]", title):
return title
word_list = re.split(' +', title)
final = [word_list[0].capitalize()]
for word in word_list[1:]:
if word.upper() in COMMON_ACRONYMS:
final.append(word.upper())
elif len(word) > 3:
final.append(word.capitalize())
else:
final.append(word.lower())
return " ".join(final) | python | {
"resource": ""
} |
q263182 | convert_html_subscripts_to_latex | validation | def convert_html_subscripts_to_latex(text):
"""Convert some HTML tags to latex equivalents."""
text = re.sub("<sub>(.*?)</sub>", r"$_{\1}$", text)
text = re.sub("<sup>(.*?)</sup>", r"$^{\1}$", text)
return text | python | {
"resource": ""
} |
q263183 | download_file | validation | def download_file(from_url, to_filename=None,
chunk_size=1024 * 8, retry_count=3):
"""Download URL to a file."""
if not to_filename:
to_filename = get_temporary_file()
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=retry_count)
session.mount(from_url, adapter)
response = session.get(from_url, stream=True)
with open(to_filename, 'wb') as fd:
for chunk in response.iter_content(chunk_size):
fd.write(chunk)
return to_filename | python | {
"resource": ""
} |
q263184 | run_shell_command | validation | def run_shell_command(commands, **kwargs):
"""Run a shell command."""
p = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
output, error = p.communicate()
return p.returncode, output, error | python | {
"resource": ""
} |
q263185 | create_logger | validation | def create_logger(name,
filename=None,
logging_level=logging.DEBUG):
"""Create a logger object."""
logger = logging.getLogger(name)
formatter = logging.Formatter(('%(asctime)s - %(name)s - '
'%(levelname)-8s - %(message)s'))
if filename:
fh = logging.FileHandler(filename=filename)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(logging_level)
return logger | python | {
"resource": ""
} |
q263186 | _do_unzip | validation | def _do_unzip(zipped_file, output_directory):
"""Perform the actual uncompression."""
z = zipfile.ZipFile(zipped_file)
for path in z.namelist():
relative_path = os.path.join(output_directory, path)
dirname, dummy = os.path.split(relative_path)
try:
if relative_path.endswith(os.sep) and not os.path.exists(dirname):
os.makedirs(relative_path)
elif not os.path.exists(relative_path):
dirname = os.path.join(output_directory, os.path.dirname(path))
if os.path.dirname(path) and not os.path.exists(dirname):
os.makedirs(dirname)
fd = open(relative_path, "w")
fd.write(z.read(path))
fd.close()
except IOError, e:
raise e
return output_directory | python | {
"resource": ""
} |
q263187 | locate | validation | def locate(pattern, root=os.curdir):
"""Locate all files matching supplied filename pattern recursively."""
for path, dummy, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename) | python | {
"resource": ""
} |
q263188 | punctuate_authorname | validation | def punctuate_authorname(an):
"""Punctuate author names properly.
Expects input in the form 'Bloggs, J K' and will return 'Bloggs, J. K.'.
"""
name = an.strip()
parts = [x for x in name.split(',') if x != '']
ret_str = ''
for idx, part in enumerate(parts):
subparts = part.strip().split(' ')
for sidx, substr in enumerate(subparts):
ret_str += substr
if len(substr) == 1:
ret_str += '.'
if sidx < (len(subparts) - 1):
ret_str += ' '
if idx < (len(parts) - 1):
ret_str += ', '
return ret_str.strip() | python | {
"resource": ""
} |
q263189 | convert_date_to_iso | validation | def convert_date_to_iso(value):
"""Convert a date-value to the ISO date standard."""
date_formats = ["%d %b %Y", "%Y/%m/%d"]
for dformat in date_formats:
try:
date = datetime.strptime(value, dformat)
return date.strftime("%Y-%m-%d")
except ValueError:
pass
return value | python | {
"resource": ""
} |
q263190 | convert_date_from_iso_to_human | validation | def convert_date_from_iso_to_human(value):
"""Convert a date-value to the ISO date standard for humans."""
try:
year, month, day = value.split("-")
except ValueError:
# Not separated by "-". Space?
try:
year, month, day = value.split(" ")
except ValueError:
# What gives? OK, lets just return as is
return value
try:
date_object = datetime(int(year), int(month), int(day))
except TypeError:
return value
return date_object.strftime("%d %b %Y") | python | {
"resource": ""
} |
q263191 | convert_images | validation | def convert_images(image_list):
"""Convert list of images to PNG format.
@param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
@return: image_list ([str, str, ...]): The list of image files when all
have been converted to PNG format.
"""
png_output_contains = 'PNG image'
ret_list = []
for image_file in image_list:
if os.path.isdir(image_file):
continue
dummy1, cmd_out, dummy2 = run_shell_command('file %s', (image_file,))
if cmd_out.find(png_output_contains) > -1:
ret_list.append(image_file)
else:
# we're just going to assume that ImageMagick can convert all
# the image types that we may be faced with
# for sure it can do EPS->PNG and JPG->PNG and PS->PNG
# and PSTEX->PNG
converted_image_file = get_converted_image_name(image_file)
cmd_list = ['convert', image_file, converted_image_file]
dummy1, cmd_out, cmd_err = run_shell_command(cmd_list)
if cmd_err == '':
ret_list.append(converted_image_file)
else:
raise Exception(cmd_err)
return ret_list | python | {
"resource": ""
} |
q263192 | get_temporary_file | validation | def get_temporary_file(prefix="tmp_",
suffix="",
directory=None):
"""Generate a safe and closed filepath."""
try:
file_fd, filepath = mkstemp(prefix=prefix,
suffix=suffix,
dir=directory)
os.close(file_fd)
except IOError, e:
try:
os.remove(filepath)
except Exception:
pass
raise e
return filepath | python | {
"resource": ""
} |
q263193 | return_letters_from_string | validation | def return_letters_from_string(text):
"""Get letters from string only."""
out = ""
for letter in text:
if letter.isalpha():
out += letter
return out | python | {
"resource": ""
} |
q263194 | license_is_oa | validation | def license_is_oa(license):
"""Return True if license is compatible with Open Access"""
for oal in OA_LICENSES:
if re.search(oal, license):
return True
return False | python | {
"resource": ""
} |
q263195 | ElsevierPackage._crawl_elsevier_and_find_issue_xml | validation | def _crawl_elsevier_and_find_issue_xml(self):
"""
Information about the current volume, issue, etc. is available
in a file called issue.xml that is available in a higher directory.
"""
self._found_issues = []
if not self.path and not self.package_name:
for issue in self.conn._get_issues():
dirname = issue.rstrip('/issue.xml')
try:
self._normalize_issue_dir_with_dtd(dirname)
self._found_issues.append(dirname)
except Exception as err:
register_exception()
print("ERROR: can't normalize %s: %s" % (dirname, err))
else:
def visit(dummy, dirname, names):
if "issue.xml" in names:
try:
self._normalize_issue_dir_with_dtd(dirname)
self._found_issues.append(dirname)
except Exception as err:
register_exception()
print("ERROR: can't normalize %s: %s"
% (dirname, err))
walk(self.path, visit, None) | python | {
"resource": ""
} |
q263196 | ElsevierPackage._normalize_issue_dir_with_dtd | validation | def _normalize_issue_dir_with_dtd(self, path):
"""
issue.xml from Elsevier assume the existence of a local DTD.
This procedure install the DTDs next to the issue.xml file
and normalize it using xmllint in order to resolve all namespaces
and references.
"""
if exists(join(path, 'resolved_issue.xml')):
return
issue_xml_content = open(join(path, 'issue.xml')).read()
sis = ['si510.dtd', 'si520.dtd', 'si540.dtd']
tmp_extracted = 0
for si in sis:
if si in issue_xml_content:
self._extract_correct_dtd_package(si.split('.')[0], path)
tmp_extracted = 1
if not tmp_extracted:
message = "It looks like the path " + path
message += " does not contain an si510, si520 or si540 in issue.xml file"
self.logger.error(message)
raise ValueError(message)
command = ["xmllint", "--format", "--loaddtd",
join(path, 'issue.xml'),
"--output", join(path, 'resolved_issue.xml')]
dummy, dummy, cmd_err = run_shell_command(command)
if cmd_err:
message = "Error in cleaning %s: %s" % (
join(path, 'issue.xml'), cmd_err)
self.logger.error(message)
raise ValueError(message) | python | {
"resource": ""
} |
q263197 | ElsevierPackage._normalize_article_dir_with_dtd | validation | def _normalize_article_dir_with_dtd(self, path):
"""
main.xml from Elsevier assume the existence of a local DTD.
This procedure install the DTDs next to the main.xml file
and normalize it using xmllint in order to resolve all namespaces
and references.
"""
if exists(join(path, 'resolved_main.xml')):
return
main_xml_content = open(join(path, 'main.xml')).read()
arts = ['art501.dtd','art510.dtd','art520.dtd','art540.dtd']
tmp_extracted = 0
for art in arts:
if art in main_xml_content:
self._extract_correct_dtd_package(art.split('.')[0], path)
tmp_extracted = 1
if not tmp_extracted:
message = "It looks like the path " + path
message += "does not contain an art501, art510, art520 or art540 in main.xml file"
self.logger.error(message)
raise ValueError(message)
command = ["xmllint", "--format", "--loaddtd",
join(path, 'main.xml'),
"--output", join(path, 'resolved_main.xml')]
dummy, dummy, cmd_err = run_shell_command(command)
if cmd_err:
message = "Error in cleaning %s: %s" % (
join(path, 'main.xml'), cmd_err)
self.logger.error(message)
raise ValueError(message) | python | {
"resource": ""
} |
q263198 | ElsevierPackage.get_publication_date | validation | def get_publication_date(self, xml_doc):
"""Return the best effort start_date."""
start_date = get_value_in_tag(xml_doc, "prism:coverDate")
if not start_date:
start_date = get_value_in_tag(xml_doc, "prism:coverDisplayDate")
if not start_date:
start_date = get_value_in_tag(xml_doc, 'oa:openAccessEffective')
if start_date:
start_date = datetime.datetime.strptime(
start_date, "%Y-%m-%dT%H:%M:%SZ"
)
return start_date.strftime("%Y-%m-%d")
import dateutil.parser
#dateutil.parser.parse cant process dates like April-June 2016
start_date = re.sub('([A-Z][a-z]+)[\s\-][A-Z][a-z]+ (\d{4})',
r'\1 \2', start_date)
try:
date = dateutil.parser.parse(start_date)
except ValueError:
return ''
# Special case where we ignore the deduced day form dateutil
# in case it was not given in the first place.
if len(start_date.split(" ")) == 3:
return date.strftime("%Y-%m-%d")
else:
return date.strftime("%Y-%m")
else:
if len(start_date) is 8:
start_date = time.strftime(
'%Y-%m-%d', time.strptime(start_date, '%Y%m%d'))
elif len(start_date) is 6:
start_date = time.strftime(
'%Y-%m', time.strptime(start_date, '%Y%m'))
return start_date | python | {
"resource": ""
} |
q263199 | extract_oembeds | validation | def extract_oembeds(text, args=None):
"""
Extract oembed resources from a block of text. Returns a list
of dictionaries.
Max width & height can be specified:
{% for embed in block_of_text|extract_oembeds:"400x300" %}
Resource type can be specified:
{% for photo_embed in block_of_text|extract_oembeds:"photo" %}
Or both:
{% for embed in block_of_text|extract_oembeds:"400x300xphoto" %}
"""
resource_type = width = height = None
if args:
dimensions = args.lower().split('x')
if len(dimensions) in (3, 1):
resource_type = dimensions.pop()
if len(dimensions) == 2:
width, height = map(lambda x: int(x), dimensions)
client = OEmbedConsumer()
return client.extract(text, width, height, resource_type) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.