_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q263200
do_oembed
validation
def do_oembed(parser, token): """ A node which parses everything between its two nodes, and replaces any links with OEmbed-provided objects, if possible. Supports two optional argument, which is the maximum width and height, specified like so: {% oembed 640x480 %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %} and or the name of a sub tempalte directory to render templates from: {% oembed 320x240 in "comments" %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %} or: {% oembed in "comments" %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %} either of those will render templates in oembed/comments/oembedtype.html Additionally, you can specify a context variable to drop the rendered text in: {% oembed 600x400 in "comments" as var_name %}...{% endoembed %} {% oembed as var_name %}...{% endoembed %} """ args = token.split_contents() template_dir = None var_name = None if len(args) > 2: if len(args) == 3 and args[1] == 'in': template_dir = args[2] elif len(args) == 3 and args[1] == 'as': var_name = args[2] elif len(args) == 4 and args[2] == 'in': template_dir = args[3] elif len(args) == 4 and args[2] == 'as': var_name = args[3] elif len(args) == 6 and args[4] == 'as': template_dir = args[3] var_name = args[5] else: raise template.TemplateSyntaxError("OEmbed either takes a single " \ "(optional) argument: WIDTHxHEIGHT, where WIDTH and HEIGHT " \ "are positive integers, and or an optional 'in " \ " \"template_dir\"' argument set.") if template_dir: if not (template_dir[0] == template_dir[-1] and template_dir[0] in ('"', "'")): raise template.TemplateSyntaxError("template_dir must be quoted") template_dir = template_dir[1:-1] if len(args) >= 2 and 'x' in args[1]: width, height = args[1].lower().split('x') if not width and height: raise template.TemplateSyntaxError("OEmbed's optional WIDTHxHEIGH" \ "T argument requires WIDTH and HEIGHT to be positive integers.") else: width, height = None, None nodelist = parser.parse(('endoembed',)) parser.delete_first_token() return OEmbedNode(nodelist, width, height, template_dir, var_name)
python
{ "resource": "" }
q263201
do_autodiscover
validation
def do_autodiscover(parser, token): """ Generates a <link> tag with oembed autodiscovery bits for an object. {% oembed_autodiscover video %} """ args = token.split_contents() if len(args) != 2: raise template.TemplateSyntaxError('%s takes an object as its parameter.' % args[0]) else: obj = args[1] return OEmbedAutodiscoverNode(obj)
python
{ "resource": "" }
q263202
do_url_scheme
validation
def do_url_scheme(parser, token): """ Generates a <link> tag with oembed autodiscovery bits. {% oembed_url_scheme %} """ args = token.split_contents() if len(args) != 1: raise template.TemplateSyntaxError('%s takes no parameters.' % args[0]) return OEmbedURLSchemeNode()
python
{ "resource": "" }
q263203
Script.parser
validation
def parser(self): """return the parser for the current name""" module = self.module subcommands = self.subcommands if subcommands: module_desc = inspect.getdoc(module) parser = Parser(description=module_desc, module=module) subparsers = parser.add_subparsers() for sc_name, callback in subcommands.items(): sc_name = sc_name.replace("_", "-") cb_desc = inspect.getdoc(callback) sc_parser = subparsers.add_parser( sc_name, callback=callback, help=cb_desc ) else: parser = Parser(callback=self.callbacks[self.function_name], module=module) return parser
python
{ "resource": "" }
q263204
Script.module
validation
def module(self): """load the module so we can actually run the script's function""" # we have to guard this value because: # https://thingspython.wordpress.com/2010/09/27/another-super-wrinkle-raising-typeerror/ if not hasattr(self, '_module'): if "__main__" in sys.modules: mod = sys.modules["__main__"] path = self.normalize_path(mod.__file__) if os.path.splitext(path) == os.path.splitext(self.path): self._module = mod else: # http://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path self._module = imp.load_source('captain_script', self.path) #self._module = imp.load_source(self.module_name, self.path) return self._module
python
{ "resource": "" }
q263205
Script.body
validation
def body(self): """get the contents of the script""" if not hasattr(self, '_body'): self._body = inspect.getsource(self.module) return self._body
python
{ "resource": "" }
q263206
Script.run
validation
def run(self, raw_args): """parse and import the script, and then run the script's main function""" parser = self.parser args, kwargs = parser.parse_callback_args(raw_args) callback = kwargs.pop("main_callback") if parser.has_injected_quiet(): levels = kwargs.pop("quiet_inject", "") logging.inject_quiet(levels) try: ret_code = callback(*args, **kwargs) ret_code = int(ret_code) if ret_code else 0 except ArgError as e: # https://hg.python.org/cpython/file/2.7/Lib/argparse.py#l2374 echo.err("{}: error: {}", parser.prog, str(e)) ret_code = 2 return ret_code
python
{ "resource": "" }
q263207
Script.call_path
validation
def call_path(self, basepath): """return that path to be able to call this script from the passed in basename example -- basepath = /foo/bar self.path = /foo/bar/che/baz.py self.call_path(basepath) # che/baz.py basepath -- string -- the directory you would be calling this script in return -- string -- the minimum path that you could use to execute this script in basepath """ rel_filepath = self.path if basepath: rel_filepath = os.path.relpath(self.path, basepath) basename = self.name if basename in set(['__init__.py', '__main__.py']): rel_filepath = os.path.dirname(rel_filepath) return rel_filepath
python
{ "resource": "" }
q263208
Script.parse
validation
def parse(self): """load the script and set the parser and argument info I feel that this is way too brittle to be used long term, I think it just might be best to import the stupid module, the thing I don't like about that is then we import basically everything, which seems bad? """ if self.parsed: return self.callbacks = {} # search for main and any main_* callable objects regex = re.compile("^{}_?".format(self.function_name), flags=re.I) mains = set() body = self.body ast_tree = ast.parse(self.body, self.path) for n in ast_tree.body: if hasattr(n, 'name'): if regex.match(n.name): mains.add(n.name) if hasattr(n, 'value'): ns = n.value if hasattr(ns, 'id'): if regex.match(ns.id): mains.add(ns.id) if hasattr(n, 'targets'): ns = n.targets[0] if hasattr(ns, 'id'): if regex.match(ns.id): mains.add(ns.id) if hasattr(n, 'names'): for ns in n.names: if hasattr(ns, 'name'): if regex.match(ns.name): mains.add(ns.name) if getattr(ns, 'asname', None): if regex.match(ns.asname): mains.add(ns.asname) if len(mains) > 0: module = self.module for function_name in mains: cb = getattr(module, function_name, None) if cb and callable(cb): self.callbacks[function_name] = cb else: raise ParseError("no main function found") self.parsed = True return len(self.callbacks) > 0
python
{ "resource": "" }
q263209
Script.can_run_from_cli
validation
def can_run_from_cli(self): """return True if this script can be run from the command line""" ret = False ast_tree = ast.parse(self.body, self.path) calls = self._find_calls(ast_tree, __name__, "exit") for call in calls: if re.search("{}\(".format(re.escape(call)), self.body): ret = True break return ret
python
{ "resource": "" }
q263210
register_field
validation
def register_field(cls, field): """ Handles registering the fields with the FieldRegistry and creating a post-save signal for the model. """ FieldRegistry.add_field(cls, field) signals.post_save.connect(handle_save_embeds, sender=cls, dispatch_uid='%s.%s.%s' % \ (cls._meta.app_label, cls._meta.module_name, field.name))
python
{ "resource": "" }
q263211
EmbeddedMediaField.contribute_to_class
validation
def contribute_to_class(self, cls, name): """ I need a way to ensure that this signal gets created for all child models, and since model inheritance doesn't have a 'contrubite_to_class' style hook, I am creating a fake virtual field which will be added to all subclasses and handles creating the signal """ super(EmbeddedMediaField, self).contribute_to_class(cls, name) register_field(cls, self) # add a virtual field that will create signals on any/all subclasses cls._meta.add_virtual_field(EmbeddedSignalCreator(self))
python
{ "resource": "" }
q263212
fetch_url
validation
def fetch_url(url, method='GET', user_agent='django-oembed', timeout=SOCKET_TIMEOUT): """ Fetch response headers and data from a URL, raising a generic exception for any kind of failure. """ sock = httplib2.Http(timeout=timeout) request_headers = { 'User-Agent': user_agent, 'Accept-Encoding': 'gzip'} try: headers, raw = sock.request(url, headers=request_headers, method=method) except: raise OEmbedHTTPException('Error fetching %s' % url) return headers, raw
python
{ "resource": "" }
q263213
relative_to_full
validation
def relative_to_full(url, example_url): """ Given a url which may or may not be a relative url, convert it to a full url path given another full url as an example """ if re.match('https?:\/\/', url): return url domain = get_domain(example_url) if domain: return '%s%s' % (domain, url) return url
python
{ "resource": "" }
q263214
mock_request
validation
def mock_request(): """ Generate a fake request object to allow oEmbeds to use context processors. """ current_site = Site.objects.get_current() request = HttpRequest() request.META['SERVER_NAME'] = current_site.domain return request
python
{ "resource": "" }
q263215
load_class
validation
def load_class(path): """ dynamically load a class given a string of the format package.Class """ package, klass = path.rsplit('.', 1) module = import_module(package) return getattr(module, klass)
python
{ "resource": "" }
q263216
CDS2Inspire.get_record
validation
def get_record(self): """Override the base get_record.""" self.update_system_numbers() self.add_systemnumber("CDS") self.fields_list = [ "024", "041", "035", "037", "088", "100", "110", "111", "242", "245", "246", "260", "269", "300", "502", "650", "653", "693", "700", "710", "773", "856", "520", "500", "980" ] self.keep_only_fields() self.determine_collections() self.add_cms_link() self.update_languages() self.update_reportnumbers() self.update_date() self.update_pagenumber() self.update_authors() self.update_subject_categories("SzGeCERN", "INSPIRE", "categories_inspire") self.update_keywords() self.update_experiments() self.update_collaboration() self.update_journals() self.update_links_and_ffts() if 'THESIS' in self.collections: self.update_thesis_supervisors() self.update_thesis_information() if 'NOTE' in self.collections: self.add_notes() for collection in self.collections: record_add_field(self.record, tag='980', subfields=[('a', collection)]) self.remove_controlfields() return self.record
python
{ "resource": "" }
q263217
CDS2Inspire.add_cms_link
validation
def add_cms_link(self): """Special handling if record is a CMS NOTE.""" intnote = record_get_field_values(self.record, '690', filter_subfield_code="a", filter_subfield_value='INTNOTE') if intnote: val_088 = record_get_field_values(self.record, tag='088', filter_subfield_code="a") for val in val_088: if 'CMS' in val: url = ('http://weblib.cern.ch/abstract?CERN-CMS' + val.split('CMS', 1)[-1]) record_add_field(self.record, tag='856', ind1='4', subfields=[('u', url)])
python
{ "resource": "" }
q263218
CDS2Inspire.update_reportnumbers
validation
def update_reportnumbers(self): """Handle reportnumbers. """ rep_088_fields = record_get_field_instances(self.record, '088') for field in rep_088_fields: subs = field_get_subfields(field) if '9' in subs: for val in subs['9']: if val.startswith('P0') or val.startswith('CM-P0'): sf = [('9', 'CERN'), ('b', val)] record_add_field(self.record, '595', subfields=sf) for key, val in field[0]: if key in ['a', '9'] and not val.startswith('SIS-'): record_add_field( self.record, '037', subfields=[('a', val)]) record_delete_fields(self.record, "088") # 037 Externals also... rep_037_fields = record_get_field_instances(self.record, '037') for field in rep_037_fields: subs = field_get_subfields(field) if 'a' in subs: for value in subs['a']: if 'arXiv' in value: new_subs = [('a', value), ('9', 'arXiv')] for fld in record_get_field_instances(self.record, '695'): for key, val in field_get_subfield_instances(fld): if key == 'a': new_subs.append(('c', val)) break nf = create_field(subfields=new_subs) record_replace_field(self.record, '037', nf, field[4]) for key, val in field[0]: if key in ['a', '9'] and val.startswith('SIS-'): record_delete_field( self.record, '037', field_position_global=field[4])
python
{ "resource": "" }
q263219
CDS2Inspire.update_keywords
validation
def update_keywords(self): """653 Free Keywords.""" for field in record_get_field_instances(self.record, '653', ind1='1'): subs = field_get_subfields(field) new_subs = [] if 'a' in subs: for val in subs['a']: new_subs.extend([('9', 'author'), ('a', val)]) new_field = create_field(subfields=new_subs, ind1='1') record_replace_field( self.record, '653', new_field, field_position_global=field[4])
python
{ "resource": "" }
q263220
CDS2Inspire.update_collaboration
validation
def update_collaboration(self): """710 Collaboration.""" for field in record_get_field_instances(self.record, '710'): subs = field_get_subfield_instances(field) for idx, (key, value) in enumerate(subs[:]): if key == '5': subs.pop(idx) elif value.startswith('CERN. Geneva'): subs.pop(idx) if len(subs) == 0: record_delete_field(self.record, tag='710', field_position_global=field[4])
python
{ "resource": "" }
q263221
create_field
validation
def create_field(subfields=None, ind1=' ', ind2=' ', controlfield_value='', global_position=-1): """ Return a field created with the provided elements. Global position is set arbitrary to -1. """ if subfields is None: subfields = [] ind1, ind2 = _wash_indicators(ind1, ind2) field = (subfields, ind1, ind2, controlfield_value, global_position) _check_field_validity(field) return field
python
{ "resource": "" }
q263222
create_records
validation
def create_records(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL, correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='', keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS): """ Create a list of records from the marcxml description. :returns: a list of objects initiated by the function create_record(). Please see that function's docstring. """ # Use the DOTALL flag to include newlines. regex = re.compile('<record.*?>.*?</record>', re.DOTALL) record_xmls = regex.findall(marcxml) return [create_record(record_xml, verbose=verbose, correct=correct, parser=parser, keep_singletons=keep_singletons) for record_xml in record_xmls]
python
{ "resource": "" }
q263223
create_record
validation
def create_record(marcxml=None, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL, correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='', sort_fields_by_indicators=False, keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS): """Create a record object from the marcxml description. Uses the lxml parser. The returned object is a tuple (record, status_code, list_of_errors), where status_code is 0 when there are errors, 1 when no errors. The return record structure is as follows:: Record := {tag : [Field]} Field := (Subfields, ind1, ind2, value) Subfields := [(code, value)] .. code-block:: none .--------. | record | '---+----' | .------------------------+------------------------------------. |record['001'] |record['909'] |record['520'] | | | | | [list of fields] [list of fields] [list of fields] ... | | | | .--------+--+-----------. | | | | | | |[0] |[0] |[1] ... |[0] .----+------. .-----+-----. .--+--------. .---+-------. | Field 001 | | Field 909 | | Field 909 | | Field 520 | '-----------' '-----+-----' '--+--------' '---+-------' | | | | ... | ... ... | .----------+-+--------+------------. | | | | |[0] |[1] |[2] | [list of subfields] 'C' '4' ... | .----+---------------+------------------------+ | | | ('a', 'value') | ('a', 'value for another a') ('b', 'value for subfield b') :param marcxml: an XML string representation of the record to create :param verbose: the level of verbosity: 0 (silent), 1-2 (warnings), 3(strict:stop when errors) :param correct: 1 to enable correction of marcxml syntax. Else 0. :return: a tuple (record, status_code, list_of_errors), where status code is 0 where there are errors, 1 when no errors """ if marcxml is None: return {} try: rec = _create_record_lxml(marcxml, verbose, correct, keep_singletons=keep_singletons) except InvenioBibRecordParserError as ex1: return (None, 0, str(ex1)) if sort_fields_by_indicators: _record_sort_by_indicators(rec) errs = [] if correct: # Correct the structure of the record. errs = _correct_record(rec) return (rec, int(not errs), errs)
python
{ "resource": "" }
q263224
filter_field_instances
validation
def filter_field_instances(field_instances, filter_subcode, filter_value, filter_mode='e'): """Filter the given field. Filters given field and returns only that field instances that contain filter_subcode with given filter_value. As an input for search function accepts output from record_get_field_instances function. Function can be run in three modes: - 'e' - looking for exact match in subfield value - 's' - looking for substring in subfield value - 'r' - looking for regular expression in subfield value Example: record_filter_field(record_get_field_instances(rec, '999', '%', '%'), 'y', '2001') In this case filter_subcode is 'y' and filter_value is '2001'. :param field_instances: output from record_get_field_instances :param filter_subcode: name of the subfield :type filter_subcode: string :param filter_value: value of the subfield :type filter_value: string :param filter_mode: 'e','s' or 'r' """ matched = [] if filter_mode == 'e': to_match = (filter_subcode, filter_value) for instance in field_instances: if to_match in instance[0]: matched.append(instance) elif filter_mode == 's': for instance in field_instances: for subfield in instance[0]: if subfield[0] == filter_subcode and \ subfield[1].find(filter_value) > -1: matched.append(instance) break elif filter_mode == 'r': reg_exp = re.compile(filter_value) for instance in field_instances: for subfield in instance[0]: if subfield[0] == filter_subcode and \ reg_exp.match(subfield[1]) is not None: matched.append(instance) break return matched
python
{ "resource": "" }
q263225
record_drop_duplicate_fields
validation
def record_drop_duplicate_fields(record): """ Return a record where all the duplicate fields have been removed. Fields are considered identical considering also the order of their subfields. """ out = {} position = 0 tags = sorted(record.keys()) for tag in tags: fields = record[tag] out[tag] = [] current_fields = set() for full_field in fields: field = (tuple(full_field[0]),) + full_field[1:4] if field not in current_fields: current_fields.add(field) position += 1 out[tag].append(full_field[:4] + (position,)) return out
python
{ "resource": "" }
q263226
records_identical
validation
def records_identical(rec1, rec2, skip_005=True, ignore_field_order=False, ignore_subfield_order=False, ignore_duplicate_subfields=False, ignore_duplicate_controlfields=False): """ Return True if rec1 is identical to rec2. It does so regardless of a difference in the 005 tag (i.e. the timestamp). """ rec1_keys = set(rec1.keys()) rec2_keys = set(rec2.keys()) if skip_005: rec1_keys.discard("005") rec2_keys.discard("005") if rec1_keys != rec2_keys: return False for key in rec1_keys: if ignore_duplicate_controlfields and key.startswith('00'): if set(field[3] for field in rec1[key]) != \ set(field[3] for field in rec2[key]): return False continue rec1_fields = rec1[key] rec2_fields = rec2[key] if len(rec1_fields) != len(rec2_fields): # They already differs in length... return False if ignore_field_order: # We sort the fields, first by indicators and then by anything else rec1_fields = sorted( rec1_fields, key=lambda elem: (elem[1], elem[2], elem[3], elem[0])) rec2_fields = sorted( rec2_fields, key=lambda elem: (elem[1], elem[2], elem[3], elem[0])) else: # We sort the fields, first by indicators, then by global position # and then by anything else rec1_fields = sorted( rec1_fields, key=lambda elem: (elem[1], elem[2], elem[4], elem[3], elem[0])) rec2_fields = sorted( rec2_fields, key=lambda elem: (elem[1], elem[2], elem[4], elem[3], elem[0])) for field1, field2 in zip(rec1_fields, rec2_fields): if ignore_duplicate_subfields: if field1[1:4] != field2[1:4] or \ set(field1[0]) != set(field2[0]): return False elif ignore_subfield_order: if field1[1:4] != field2[1:4] or \ sorted(field1[0]) != sorted(field2[0]): return False elif field1[:4] != field2[:4]: return False return True
python
{ "resource": "" }
q263227
record_get_field_instances
validation
def record_get_field_instances(rec, tag="", ind1=" ", ind2=" "): """ Return the list of field instances for the specified tag and indications. Return empty list if not found. If tag is empty string, returns all fields Parameters (tag, ind1, ind2) can contain wildcard %. :param rec: a record structure as returned by create_record() :param tag: a 3 characters long string :param ind1: a 1 character long string :param ind2: a 1 character long string :param code: a 1 character long string :return: a list of field tuples (Subfields, ind1, ind2, value, field_position_global) where subfields is list of (code, value) """ if not rec: return [] if not tag: return rec.items() else: out = [] ind1, ind2 = _wash_indicators(ind1, ind2) if '%' in tag: # Wildcard in tag. Check all possible for field_tag in rec: if _tag_matches_pattern(field_tag, tag): for possible_field_instance in rec[field_tag]: if (ind1 in ('%', possible_field_instance[1]) and ind2 in ('%', possible_field_instance[2])): out.append(possible_field_instance) else: # Completely defined tag. Use dict for possible_field_instance in rec.get(tag, []): if (ind1 in ('%', possible_field_instance[1]) and ind2 in ('%', possible_field_instance[2])): out.append(possible_field_instance) return out
python
{ "resource": "" }
q263228
record_delete_field
validation
def record_delete_field(rec, tag, ind1=' ', ind2=' ', field_position_global=None, field_position_local=None): """ Delete the field with the given position. If global field position is specified, deletes the field with the corresponding global field position. If field_position_local is specified, deletes the field with the corresponding local field position and tag. Else deletes all the fields matching tag and optionally ind1 and ind2. If both field_position_global and field_position_local are present, then field_position_local takes precedence. :param rec: the record data structure :param tag: the tag of the field to be deleted :param ind1: the first indicator of the field to be deleted :param ind2: the second indicator of the field to be deleted :param field_position_global: the global field position (record wise) :param field_position_local: the local field position (tag wise) :return: the list of deleted fields """ error = _validate_record_field_positions_global(rec) if error: # FIXME one should write a message here. pass if tag not in rec: return False ind1, ind2 = _wash_indicators(ind1, ind2) deleted = [] newfields = [] if field_position_global is None and field_position_local is None: # Remove all fields with tag 'tag'. for field in rec[tag]: if field[1] != ind1 or field[2] != ind2: newfields.append(field) else: deleted.append(field) rec[tag] = newfields elif field_position_global is not None: # Remove the field with 'field_position_global'. for field in rec[tag]: if (field[1] != ind1 and field[2] != ind2 or field[4] != field_position_global): newfields.append(field) else: deleted.append(field) rec[tag] = newfields elif field_position_local is not None: # Remove the field with 'field_position_local'. try: del rec[tag][field_position_local] except IndexError: return [] if not rec[tag]: # Tag is now empty, remove it. del rec[tag] return deleted
python
{ "resource": "" }
q263229
record_add_fields
validation
def record_add_fields(rec, tag, fields, field_position_local=None, field_position_global=None): """ Add the fields into the record at the required position. The position is specified by the tag and the field_position_local in the list of fields. :param rec: a record structure :param tag: the tag of the fields to be moved :param field_position_local: the field_position_local to which the field will be inserted. If not specified, appends the fields to the tag. :param a: list of fields to be added :return: -1 if the operation failed, or the field_position_local if it was successful """ if field_position_local is None and field_position_global is None: for field in fields: record_add_field( rec, tag, ind1=field[1], ind2=field[2], subfields=field[0], controlfield_value=field[3]) else: fields.reverse() for field in fields: record_add_field( rec, tag, ind1=field[1], ind2=field[2], subfields=field[0], controlfield_value=field[3], field_position_local=field_position_local, field_position_global=field_position_global) return field_position_local
python
{ "resource": "" }
q263230
record_move_fields
validation
def record_move_fields(rec, tag, field_positions_local, field_position_local=None): """ Move some fields to the position specified by 'field_position_local'. :param rec: a record structure as returned by create_record() :param tag: the tag of the fields to be moved :param field_positions_local: the positions of the fields to move :param field_position_local: insert the field before that field_position_local. If unspecified, appends the fields :return: the field_position_local is the operation was successful """ fields = record_delete_fields( rec, tag, field_positions_local=field_positions_local) return record_add_fields( rec, tag, fields, field_position_local=field_position_local)
python
{ "resource": "" }
q263231
record_delete_subfield
validation
def record_delete_subfield(rec, tag, subfield_code, ind1=' ', ind2=' '): """Delete all subfields with subfield_code in the record.""" ind1, ind2 = _wash_indicators(ind1, ind2) for field in rec.get(tag, []): if field[1] == ind1 and field[2] == ind2: field[0][:] = [subfield for subfield in field[0] if subfield_code != subfield[0]]
python
{ "resource": "" }
q263232
record_get_field
validation
def record_get_field(rec, tag, field_position_global=None, field_position_local=None): """ Return the the matching field. One has to enter either a global field position or a local field position. :return: a list of subfield tuples (subfield code, value). :rtype: list """ if field_position_global is None and field_position_local is None: raise InvenioBibRecordFieldError( "A field position is required to " "complete this operation.") elif field_position_global is not None and \ field_position_local is not None: raise InvenioBibRecordFieldError( "Only one field position is required " "to complete this operation.") elif field_position_global: if tag not in rec: raise InvenioBibRecordFieldError("No tag '%s' in record." % tag) for field in rec[tag]: if field[4] == field_position_global: return field raise InvenioBibRecordFieldError( "No field has the tag '%s' and the " "global field position '%d'." % (tag, field_position_global)) else: try: return rec[tag][field_position_local] except KeyError: raise InvenioBibRecordFieldError("No tag '%s' in record." % tag) except IndexError: raise InvenioBibRecordFieldError( "No field has the tag '%s' and " "the local field position '%d'." % (tag, field_position_local))
python
{ "resource": "" }
q263233
record_replace_field
validation
def record_replace_field(rec, tag, new_field, field_position_global=None, field_position_local=None): """Replace a field with a new field.""" if field_position_global is None and field_position_local is None: raise InvenioBibRecordFieldError( "A field position is required to " "complete this operation.") elif field_position_global is not None and \ field_position_local is not None: raise InvenioBibRecordFieldError( "Only one field position is required " "to complete this operation.") elif field_position_global: if tag not in rec: raise InvenioBibRecordFieldError("No tag '%s' in record." % tag) replaced = False for position, field in enumerate(rec[tag]): if field[4] == field_position_global: rec[tag][position] = new_field replaced = True if not replaced: raise InvenioBibRecordFieldError( "No field has the tag '%s' and " "the global field position '%d'." % (tag, field_position_global)) else: try: rec[tag][field_position_local] = new_field except KeyError: raise InvenioBibRecordFieldError("No tag '%s' in record." % tag) except IndexError: raise InvenioBibRecordFieldError( "No field has the tag '%s' and " "the local field position '%d'." % (tag, field_position_local))
python
{ "resource": "" }
q263234
record_get_subfields
validation
def record_get_subfields(rec, tag, field_position_global=None, field_position_local=None): """ Return the subfield of the matching field. One has to enter either a global field position or a local field position. :return: a list of subfield tuples (subfield code, value). :rtype: list """ field = record_get_field( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) return field[0]
python
{ "resource": "" }
q263235
record_delete_subfield_from
validation
def record_delete_subfield_from(rec, tag, subfield_position, field_position_global=None, field_position_local=None): """ Delete subfield from position specified. Specify the subfield by tag, field number and subfield position. """ subfields = record_get_subfields( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) try: del subfields[subfield_position] except IndexError: raise InvenioBibRecordFieldError( "The record does not contain the subfield " "'%(subfieldIndex)s' inside the field (local: " "'%(fieldIndexLocal)s, global: '%(fieldIndexGlobal)s' ) of tag " "'%(tag)s'." % {"subfieldIndex": subfield_position, "fieldIndexLocal": str(field_position_local), "fieldIndexGlobal": str(field_position_global), "tag": tag}) if not subfields: if field_position_global is not None: for position, field in enumerate(rec[tag]): if field[4] == field_position_global: del rec[tag][position] else: del rec[tag][field_position_local] if not rec[tag]: del rec[tag]
python
{ "resource": "" }
q263236
record_add_subfield_into
validation
def record_add_subfield_into(rec, tag, subfield_code, value, subfield_position=None, field_position_global=None, field_position_local=None): """Add subfield into specified position. Specify the subfield by tag, field number and optionally by subfield position. """ subfields = record_get_subfields( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) if subfield_position is None: subfields.append((subfield_code, value)) else: subfields.insert(subfield_position, (subfield_code, value))
python
{ "resource": "" }
q263237
record_modify_controlfield
validation
def record_modify_controlfield(rec, tag, controlfield_value, field_position_global=None, field_position_local=None): """Modify controlfield at position specified by tag and field number.""" field = record_get_field( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) new_field = (field[0], field[1], field[2], controlfield_value, field[4]) record_replace_field( rec, tag, new_field, field_position_global=field_position_global, field_position_local=field_position_local)
python
{ "resource": "" }
q263238
record_modify_subfield
validation
def record_modify_subfield(rec, tag, subfield_code, value, subfield_position, field_position_global=None, field_position_local=None): """Modify subfield at specified position. Specify the subfield by tag, field number and subfield position. """ subfields = record_get_subfields( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) try: subfields[subfield_position] = (subfield_code, value) except IndexError: raise InvenioBibRecordFieldError( "There is no subfield with position '%d'." % subfield_position)
python
{ "resource": "" }
q263239
record_move_subfield
validation
def record_move_subfield(rec, tag, subfield_position, new_subfield_position, field_position_global=None, field_position_local=None): """Move subfield at specified position. Sspecify the subfield by tag, field number and subfield position to new subfield position. """ subfields = record_get_subfields( rec, tag, field_position_global=field_position_global, field_position_local=field_position_local) try: subfield = subfields.pop(subfield_position) subfields.insert(new_subfield_position, subfield) except IndexError: raise InvenioBibRecordFieldError( "There is no subfield with position '%d'." % subfield_position)
python
{ "resource": "" }
q263240
record_xml_output
validation
def record_xml_output(rec, tags=None, order_fn=None): """Generate the XML for record 'rec'. :param rec: record :param tags: list of tags to be printed :return: string """ if tags is None: tags = [] if isinstance(tags, str): tags = [tags] if tags and '001' not in tags: # Add the missing controlfield. tags.append('001') marcxml = ['<record>'] # Add the tag 'tag' to each field in rec[tag] fields = [] if rec is not None: for tag in rec: if not tags or tag in tags: for field in rec[tag]: fields.append((tag, field)) if order_fn is None: record_order_fields(fields) else: record_order_fields(fields, order_fn) for field in fields: marcxml.append(field_xml_output(field[1], field[0])) marcxml.append('</record>') return '\n'.join(marcxml)
python
{ "resource": "" }
q263241
field_xml_output
validation
def field_xml_output(field, tag): """Generate the XML for field 'field' and returns it as a string.""" marcxml = [] if field[3]: marcxml.append(' <controlfield tag="%s">%s</controlfield>' % (tag, MathMLParser.html_to_text(field[3]))) else: marcxml.append(' <datafield tag="%s" ind1="%s" ind2="%s">' % (tag, field[1], field[2])) marcxml += [_subfield_xml_output(subfield) for subfield in field[0]] marcxml.append(' </datafield>') return '\n'.join(marcxml)
python
{ "resource": "" }
q263242
print_rec
validation
def print_rec(rec, format=1, tags=None): """ Print a record. :param format: 1 XML, 2 HTML (not implemented) :param tags: list of tags to be printed """ if tags is None: tags = [] if format == 1: text = record_xml_output(rec, tags) else: return '' return text
python
{ "resource": "" }
q263243
print_recs
validation
def print_recs(listofrec, format=1, tags=None): """ Print a list of records. :param format: 1 XML, 2 HTML (not implemented) :param tags: list of tags to be printed if 'listofrec' is not a list it returns empty string """ if tags is None: tags = [] text = "" if type(listofrec).__name__ != 'list': return "" else: for rec in listofrec: text = "%s\n%s" % (text, print_rec(rec, format, tags)) return text
python
{ "resource": "" }
q263244
record_find_field
validation
def record_find_field(rec, tag, field, strict=False): """ Return the global and local positions of the first occurrence of the field. :param rec: A record dictionary structure :type rec: dictionary :param tag: The tag of the field to search for :type tag: string :param field: A field tuple as returned by create_field() :type field: tuple :param strict: A boolean describing the search method. If strict is False, then the order of the subfields doesn't matter. Default search method is strict. :type strict: boolean :return: A tuple of (global_position, local_position) or a tuple (None, None) if the field is not present. :rtype: tuple :raise InvenioBibRecordFieldError: If the provided field is invalid. """ try: _check_field_validity(field) except InvenioBibRecordFieldError: raise for local_position, field1 in enumerate(rec.get(tag, [])): if _compare_fields(field, field1, strict): return (field1[4], local_position) return (None, None)
python
{ "resource": "" }
q263245
record_match_subfields
validation
def record_match_subfields(rec, tag, ind1=" ", ind2=" ", sub_key=None, sub_value='', sub_key2=None, sub_value2='', case_sensitive=True): """ Find subfield instances in a particular field. It tests values in 1 of 3 possible ways: - Does a subfield code exist? (ie does 773__a exist?) - Does a subfield have a particular value? (ie 773__a == 'PhysX') - Do a pair of subfields have particular values? (ie 035__2 == 'CDS' and 035__a == '123456') Parameters: * rec - dictionary: a bibrecord structure * tag - string: the tag of the field (ie '773') * ind1, ind2 - char: a single characters for the MARC indicators * sub_key - char: subfield key to find * sub_value - string: subfield value of that key * sub_key2 - char: key of subfield to compare against * sub_value2 - string: expected value of second subfield * case_sensitive - bool: be case sensitive when matching values :return: false if no match found, else provides the field position (int) """ if sub_key is None: raise TypeError("None object passed for parameter sub_key.") if sub_key2 is not None and sub_value2 is '': raise TypeError("Parameter sub_key2 defined but sub_value2 is None, " + "function requires a value for comparrison.") ind1, ind2 = _wash_indicators(ind1, ind2) if not case_sensitive: sub_value = sub_value.lower() sub_value2 = sub_value2.lower() for field in record_get_field_instances(rec, tag, ind1, ind2): subfields = dict(field_get_subfield_instances(field)) if not case_sensitive: for k, v in subfields.iteritems(): subfields[k] = v.lower() if sub_key in subfields: if sub_value is '': return field[4] else: if sub_value == subfields[sub_key]: if sub_key2 is None: return field[4] else: if sub_key2 in subfields: if sub_value2 == subfields[sub_key2]: return field[4] return False
python
{ "resource": "" }
q263246
record_strip_empty_volatile_subfields
validation
def record_strip_empty_volatile_subfields(rec): """Remove unchanged volatile subfields from the record.""" for tag in rec.keys(): for field in rec[tag]: field[0][:] = [subfield for subfield in field[0] if subfield[1][:9] != "VOLATILE:"]
python
{ "resource": "" }
q263247
record_make_all_subfields_volatile
validation
def record_make_all_subfields_volatile(rec): """ Turns all subfields to volatile """ for tag in rec.keys(): for field_position, field in enumerate(rec[tag]): for subfield_position, subfield in enumerate(field[0]): if subfield[1][:9] != "VOLATILE:": record_modify_subfield(rec, tag, subfield[0], "VOLATILE:" + subfield[1], subfield_position, field_position_local=field_position)
python
{ "resource": "" }
q263248
record_strip_empty_fields
validation
def record_strip_empty_fields(rec, tag=None): """ Remove empty subfields and fields from the record. If 'tag' is not None, only a specific tag of the record will be stripped, otherwise the whole record. :param rec: A record dictionary structure :type rec: dictionary :param tag: The tag of the field to strip empty fields from :type tag: string """ # Check whole record if tag is None: tags = rec.keys() for tag in tags: record_strip_empty_fields(rec, tag) # Check specific tag of the record elif tag in rec: # in case of a controlfield if tag[:2] == '00': if len(rec[tag]) == 0 or not rec[tag][0][3]: del rec[tag] #in case of a normal field else: fields = [] for field in rec[tag]: subfields = [] for subfield in field[0]: # check if the subfield has been given a value if subfield[1]: # Always strip values subfield = (subfield[0], subfield[1].strip()) subfields.append(subfield) if len(subfields) > 0: new_field = create_field(subfields, field[1], field[2], field[3]) fields.append(new_field) if len(fields) > 0: rec[tag] = fields else: del rec[tag]
python
{ "resource": "" }
q263249
record_strip_controlfields
validation
def record_strip_controlfields(rec): """ Remove all non-empty controlfields from the record. :param rec: A record dictionary structure :type rec: dictionary """ for tag in rec.keys(): if tag[:2] == '00' and rec[tag][0][3]: del rec[tag]
python
{ "resource": "" }
q263250
record_order_subfields
validation
def record_order_subfields(rec, tag=None): """ Order subfields from a record alphabetically based on subfield code. If 'tag' is not None, only a specific tag of the record will be reordered, otherwise the whole record. :param rec: bibrecord :type rec: bibrec :param tag: tag where the subfields will be ordered :type tag: str """ if rec is None: return rec if tag is None: tags = rec.keys() for tag in tags: record_order_subfields(rec, tag) elif tag in rec: for i in xrange(len(rec[tag])): field = rec[tag][i] # Order subfields alphabetically by subfield code ordered_subfields = sorted(field[0], key=lambda subfield: subfield[0]) rec[tag][i] = (ordered_subfields, field[1], field[2], field[3], field[4])
python
{ "resource": "" }
q263251
_compare_fields
validation
def _compare_fields(field1, field2, strict=True): """ Compare 2 fields. If strict is True, then the order of the subfield will be taken care of, if not then the order of the subfields doesn't matter. :return: True if the field are equivalent, False otherwise. """ if strict: # Return a simple equal test on the field minus the position. return field1[:4] == field2[:4] else: if field1[1:4] != field2[1:4]: # Different indicators or controlfield value. return False else: # Compare subfields in a loose way. return set(field1[0]) == set(field2[0])
python
{ "resource": "" }
q263252
_check_field_validity
validation
def _check_field_validity(field): """ Check if a field is well-formed. :param field: A field tuple as returned by create_field() :type field: tuple :raise InvenioBibRecordFieldError: If the field is invalid. """ if type(field) not in (list, tuple): raise InvenioBibRecordFieldError( "Field of type '%s' should be either " "a list or a tuple." % type(field)) if len(field) != 5: raise InvenioBibRecordFieldError( "Field of length '%d' should have 5 " "elements." % len(field)) if type(field[0]) not in (list, tuple): raise InvenioBibRecordFieldError( "Subfields of type '%s' should be " "either a list or a tuple." % type(field[0])) if type(field[1]) is not str: raise InvenioBibRecordFieldError( "Indicator 1 of type '%s' should be " "a string." % type(field[1])) if type(field[2]) is not str: raise InvenioBibRecordFieldError( "Indicator 2 of type '%s' should be " "a string." % type(field[2])) if type(field[3]) is not str: raise InvenioBibRecordFieldError( "Controlfield value of type '%s' " "should be a string." % type(field[3])) if type(field[4]) is not int: raise InvenioBibRecordFieldError( "Global position of type '%s' should " "be an int." % type(field[4])) for subfield in field[0]: if (type(subfield) not in (list, tuple) or len(subfield) != 2 or type(subfield[0]) is not str or type(subfield[1]) is not str): raise InvenioBibRecordFieldError( "Subfields are malformed. " "Should a list of tuples of 2 strings.")
python
{ "resource": "" }
q263253
_shift_field_positions_global
validation
def _shift_field_positions_global(record, start, delta=1): """ Shift all global field positions. Shift all global field positions with global field positions higher or equal to 'start' from the value 'delta'. """ if not delta: return for tag, fields in record.items(): newfields = [] for field in fields: if field[4] < start: newfields.append(field) else: # Increment the global field position by delta. newfields.append(tuple(list(field[:4]) + [field[4] + delta])) record[tag] = newfields
python
{ "resource": "" }
q263254
_tag_matches_pattern
validation
def _tag_matches_pattern(tag, pattern): """Return true if MARC 'tag' matches a 'pattern'. 'pattern' is plain text, with % as wildcard Both parameters must be 3 characters long strings. .. doctest:: >>> _tag_matches_pattern("909", "909") True >>> _tag_matches_pattern("909", "9%9") True >>> _tag_matches_pattern("909", "9%8") False :param tag: a 3 characters long string :param pattern: a 3 characters long string :return: False or True """ for char1, char2 in zip(tag, pattern): if char2 not in ('%', char1): return False return True
python
{ "resource": "" }
q263255
_validate_record_field_positions_global
validation
def _validate_record_field_positions_global(record): """ Check if the global field positions in the record are valid. I.e., no duplicate global field positions and local field positions in the list of fields are ascending. :param record: the record data structure :return: the first error found as a string or None if no error was found """ all_fields = [] for tag, fields in record.items(): previous_field_position_global = -1 for field in fields: if field[4] < previous_field_position_global: return ("Non ascending global field positions in tag '%s'." % tag) previous_field_position_global = field[4] if field[4] in all_fields: return ("Duplicate global field position '%d' in tag '%s'" % (field[4], tag))
python
{ "resource": "" }
q263256
_record_sort_by_indicators
validation
def _record_sort_by_indicators(record): """Sort the fields inside the record by indicators.""" for tag, fields in record.items(): record[tag] = _fields_sort_by_indicators(fields)
python
{ "resource": "" }
q263257
_fields_sort_by_indicators
validation
def _fields_sort_by_indicators(fields): """Sort a set of fields by their indicators. Return a sorted list with correct global field positions. """ field_dict = {} field_positions_global = [] for field in fields: field_dict.setdefault(field[1:3], []).append(field) field_positions_global.append(field[4]) indicators = field_dict.keys() indicators.sort() field_list = [] for indicator in indicators: for field in field_dict[indicator]: field_list.append(field[:4] + (field_positions_global.pop(0),)) return field_list
python
{ "resource": "" }
q263258
_create_record_lxml
validation
def _create_record_lxml(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL, correct=CFG_BIBRECORD_DEFAULT_CORRECT, keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS): """ Create a record object using the LXML parser. If correct == 1, then perform DTD validation If correct == 0, then do not perform DTD validation If verbose == 0, the parser will not give warnings. If 1 <= verbose <= 3, the parser will not give errors, but will warn the user about possible mistakes (implement me!) If verbose > 3 then the parser will be strict and will stop in case of well-formedness errors or DTD errors. """ parser = etree.XMLParser(dtd_validation=correct, recover=(verbose <= 3)) if correct: marcxml = '<?xml version="1.0" encoding="UTF-8"?>\n' \ '<collection>\n%s\n</collection>' % (marcxml,) try: tree = etree.parse(StringIO(marcxml), parser) # parser errors are located in parser.error_log # if 1 <= verbose <=3 then show them to the user? # if verbose == 0 then continue # if verbose >3 then an exception will be thrown except Exception as e: raise InvenioBibRecordParserError(str(e)) record = {} field_position_global = 0 controlfield_iterator = tree.iter(tag='{*}controlfield') for controlfield in controlfield_iterator: tag = controlfield.attrib.get('tag', '!').encode("UTF-8") ind1 = ' ' ind2 = ' ' text = controlfield.text if text is None: text = '' else: text = text.encode("UTF-8") subfields = [] if text or keep_singletons: field_position_global += 1 record.setdefault(tag, []).append((subfields, ind1, ind2, text, field_position_global)) datafield_iterator = tree.iter(tag='{*}datafield') for datafield in datafield_iterator: tag = datafield.attrib.get('tag', '!').encode("UTF-8") ind1 = datafield.attrib.get('ind1', '!').encode("UTF-8") ind2 = datafield.attrib.get('ind2', '!').encode("UTF-8") if ind1 in ('', '_'): ind1 = ' ' if ind2 in ('', '_'): ind2 = ' ' subfields = [] subfield_iterator = datafield.iter(tag='{*}subfield') for subfield in subfield_iterator: code = subfield.attrib.get('code', '!').encode("UTF-8") text = subfield.text if text is None: text = '' else: text = text.encode("UTF-8") if text or keep_singletons: subfields.append((code, text)) if subfields or keep_singletons: text = '' field_position_global += 1 record.setdefault(tag, []).append((subfields, ind1, ind2, text, field_position_global)) return record
python
{ "resource": "" }
q263259
_get_children_by_tag_name
validation
def _get_children_by_tag_name(node, name): """Retrieve all children from node 'node' with name 'name'.""" try: return [child for child in node.childNodes if child.nodeName == name] except TypeError: return []
python
{ "resource": "" }
q263260
_get_children_as_string
validation
def _get_children_as_string(node): """Iterate through all the children of a node. Returns one string containing the values from all the text-nodes recursively. """ out = [] if node: for child in node: if child.nodeType == child.TEXT_NODE: out.append(child.data) else: out.append(_get_children_as_string(child.childNodes)) return ''.join(out)
python
{ "resource": "" }
q263261
_correct_record
validation
def _correct_record(record): """ Check and correct the structure of the record. :param record: the record data structure :return: a list of errors found """ errors = [] for tag in record.keys(): upper_bound = '999' n = len(tag) if n > 3: i = n - 3 while i > 0: upper_bound = '%s%s' % ('0', upper_bound) i -= 1 # Missing tag. Replace it with dummy tag '000'. if tag == '!': errors.append((1, '(field number(s): ' + str([f[4] for f in record[tag]]) + ')')) record['000'] = record.pop(tag) tag = '000' elif not ('001' <= tag <= upper_bound or tag in ('FMT', 'FFT', 'BDR', 'BDM')): errors.append(2) record['000'] = record.pop(tag) tag = '000' fields = [] for field in record[tag]: # Datafield without any subfield. if field[0] == [] and field[3] == '': errors.append((8, '(field number: ' + str(field[4]) + ')')) subfields = [] for subfield in field[0]: if subfield[0] == '!': errors.append((3, '(field number: ' + str(field[4]) + ')')) newsub = ('', subfield[1]) else: newsub = subfield subfields.append(newsub) if field[1] == '!': errors.append((4, '(field number: ' + str(field[4]) + ')')) ind1 = " " else: ind1 = field[1] if field[2] == '!': errors.append((5, '(field number: ' + str(field[4]) + ')')) ind2 = " " else: ind2 = field[2] fields.append((subfields, ind1, ind2, field[3], field[4])) record[tag] = fields return errors
python
{ "resource": "" }
q263262
_warning
validation
def _warning(code): """ Return a warning message of code 'code'. If code = (cd, str) it returns the warning message of code 'cd' and appends str at the end """ if isinstance(code, str): return code message = '' if isinstance(code, tuple): if isinstance(code[0], str): message = code[1] code = code[0] return CFG_BIBRECORD_WARNING_MSGS.get(code, '') + message
python
{ "resource": "" }
q263263
_compare_lists
validation
def _compare_lists(list1, list2, custom_cmp): """Compare twolists using given comparing function. :param list1: first list to compare :param list2: second list to compare :param custom_cmp: a function taking two arguments (element of list 1, element of list 2) and :return: True or False depending if the values are the same """ if len(list1) != len(list2): return False for element1, element2 in zip(list1, list2): if not custom_cmp(element1, element2): return False return True
python
{ "resource": "" }
q263264
BibRecordPackage.parse
validation
def parse(self, path_to_xml=None): """Parse an XML document and clean any namespaces.""" if not path_to_xml: if not self.path: self.logger.error("No path defined!") return path_to_xml = self.path root = self._clean_xml(path_to_xml) # See first of this XML is clean or OAI request if root.tag.lower() == 'collection': tree = ET.ElementTree(root) self.records = element_tree_collection_to_records(tree) elif root.tag.lower() == 'record': new_root = ET.Element('collection') new_root.append(root) tree = ET.ElementTree(new_root) self.records = element_tree_collection_to_records(tree) else: # We have an OAI request header_subs = get_request_subfields(root) records = root.find('ListRecords') if records is None: records = root.find('GetRecord') if records is None: raise ValueError("Cannot find ListRecords or GetRecord!") tree = ET.ElementTree(records) for record, is_deleted in element_tree_oai_records(tree, header_subs): if is_deleted: # It was OAI deleted. Create special record self.deleted_records.append( self.create_deleted_record(record) ) else: self.records.append(record)
python
{ "resource": "" }
q263265
BibRecordPackage._clean_xml
validation
def _clean_xml(self, path_to_xml): """Clean MARCXML harvested from OAI. Allows the xml to be used with BibUpload or BibRecord. :param xml: either XML as a string or path to an XML file :return: ElementTree of clean data """ try: if os.path.isfile(path_to_xml): tree = ET.parse(path_to_xml) root = tree.getroot() else: root = ET.fromstring(path_to_xml) except Exception, e: self.logger.error("Could not read OAI XML, aborting filter!") raise e strip_xml_namespace(root) return root
python
{ "resource": "" }
q263266
BibRecordPackage.create_deleted_record
validation
def create_deleted_record(self, record): """Generate the record deletion if deleted form OAI-PMH.""" identifier = record_get_field_value(record, tag="037", code="a") recid = identifier.split(":")[-1] try: source = identifier.split(":")[1] except IndexError: source = "Unknown" record_add_field(record, "035", subfields=[("9", source), ("a", recid)]) record_add_field(record, "980", subfields=[("c", "DELETED")]) return record
python
{ "resource": "" }
q263267
YesssSMS._login
validation
def _login(self, session, get_request=False): """Return a session for yesss.at.""" req = session.post(self._login_url, data=self._logindata) if _LOGIN_ERROR_STRING in req.text or \ req.status_code == 403 or \ req.url == _LOGIN_URL: err_mess = "YesssSMS: login failed, username or password wrong" if _LOGIN_LOCKED_MESS in req.text: err_mess += ", page says: " + _LOGIN_LOCKED_MESS_ENG self._suspended = True raise self.AccountSuspendedError(err_mess) raise self.LoginError(err_mess) self._suspended = False # login worked return (session, req) if get_request else session
python
{ "resource": "" }
q263268
YesssSMS.login_data_valid
validation
def login_data_valid(self): """Check for working login data.""" login_working = False try: with self._login(requests.Session()) as sess: sess.get(self._logout_url) except self.LoginError: pass else: login_working = True return login_working
python
{ "resource": "" }
q263269
YesssSMS.send
validation
def send(self, recipient, message): """Send an SMS.""" if self._logindata['login_rufnummer'] is None or \ self._logindata['login_passwort'] is None: err_mess = "YesssSMS: Login data required" raise self.LoginError(err_mess) if not recipient: raise self.NoRecipientError("YesssSMS: recipient number missing") if not isinstance(recipient, str): raise ValueError("YesssSMS: str expected as recipient number") if not message: raise self.EmptyMessageError("YesssSMS: message is empty") with self._login(requests.Session()) as sess: sms_data = {'to_nummer': recipient, 'nachricht': message} req = sess.post(self._websms_url, data=sms_data) if not (req.status_code == 200 or req.status_code == 302): raise self.SMSSendingError("YesssSMS: error sending SMS") if _UNSUPPORTED_CHARS_STRING in req.text: raise self.UnsupportedCharsError( "YesssSMS: message contains unsupported character(s)") if _SMS_SENDING_SUCCESSFUL_STRING not in req.text: raise self.SMSSendingError("YesssSMS: error sending SMS") sess.get(self._logout_url)
python
{ "resource": "" }
q263270
WorldScientific.get_date
validation
def get_date(self, filename): """Return the date of the article in file.""" try: self.document = parse(filename) return self._get_date() except DateNotFoundException: print("Date problem found in {0}".format(filename)) return datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d")
python
{ "resource": "" }
q263271
WorldScientific.get_collection
validation
def get_collection(self, journal): """Return this articles' collection.""" conference = '' for tag in self.document.getElementsByTagName('conference'): conference = xml_to_text(tag) if conference or journal == "International Journal of Modern Physics: Conference Series": return [('a', 'HEP'), ('a', 'ConferencePaper')] elif self._get_article_type() == "review-article": return [('a', 'HEP'), ('a', 'Review')] else: return [('a', 'HEP'), ('a', 'Published')]
python
{ "resource": "" }
q263272
WorldScientific._attach_fulltext
validation
def _attach_fulltext(self, rec, doi): """Attach fulltext FFT.""" url = os.path.join(self.url_prefix, doi) record_add_field(rec, 'FFT', subfields=[('a', url), ('t', 'INSPIRE-PUBLIC'), ('d', 'Fulltext')])
python
{ "resource": "" }
q263273
MARCXMLConversion.convert_all
validation
def convert_all(cls, records): """Convert the list of bibrecs into one MARCXML. >>> from harvestingkit.bibrecord import BibRecordPackage >>> from harvestingkit.inspire_cds_package import Inspire2CDS >>> bibrecs = BibRecordPackage("inspire.xml") >>> bibrecs.parse() >>> xml = Inspire2CDS.convert_all(bibrecs.get_records()) :param records: list of BibRecord dicts :type records: list :returns: MARCXML as string """ out = ["<collection>"] for rec in records: conversion = cls(rec) out.append(conversion.convert()) out.append("</collection>") return "\n".join(out)
python
{ "resource": "" }
q263274
MARCXMLConversion.from_source
validation
def from_source(cls, source): """Yield single conversion objects from a MARCXML file or string. >>> from harvestingkit.inspire_cds_package import Inspire2CDS >>> for record in Inspire2CDS.from_source("inspire.xml"): >>> xml = record.convert() """ bibrecs = BibRecordPackage(source) bibrecs.parse() for bibrec in bibrecs.get_records(): yield cls(bibrec)
python
{ "resource": "" }
q263275
MARCXMLConversion.get_config_item
validation
def get_config_item(cls, key, kb_name, allow_substring=True): """Return the opposite mapping by searching the imported KB.""" config_dict = cls.kbs.get(kb_name, None) if config_dict: if key in config_dict: return config_dict[key] elif allow_substring: res = [v for k, v in config_dict.items() if key in k] if res: return res[0] return key
python
{ "resource": "" }
q263276
MARCXMLConversion.load_config
validation
def load_config(from_key, to_key): """Load configuration from config. Meant to run only once per system process as class variable in subclasses.""" from .mappings import mappings kbs = {} for key, values in mappings['config'].iteritems(): parse_dict = {} for mapping in values: # {'inspire': 'Norwegian', 'cds': 'nno'} # -> {"Norwegian": "nno"} parse_dict[mapping[from_key]] = mapping[to_key] kbs[key] = parse_dict return kbs
python
{ "resource": "" }
q263277
MARCXMLConversion.match
validation
def match(self, query=None, **kwargs): """Try to match the current record to the database.""" from invenio.search_engine import perform_request_search if not query: # We use default setup recid = self.record["001"][0][3] return perform_request_search(p="035:%s" % (recid,), of="id") else: if "recid" not in kwargs: kwargs["recid"] = self.record["001"][0][3] return perform_request_search(p=query % kwargs, of="id")
python
{ "resource": "" }
q263278
MARCXMLConversion.keep_only_fields
validation
def keep_only_fields(self): """Keep only fields listed in field_list.""" for tag in self.record.keys(): if tag not in self.fields_list: record_delete_fields(self.record, tag)
python
{ "resource": "" }
q263279
MARCXMLConversion.strip_fields
validation
def strip_fields(self): """Clear any fields listed in field_list.""" for tag in self.record.keys(): if tag in self.fields_list: record_delete_fields(self.record, tag)
python
{ "resource": "" }
q263280
MARCXMLConversion.add_systemnumber
validation
def add_systemnumber(self, source, recid=None): """Add 035 number from 001 recid with given source.""" if not recid: recid = self.get_recid() if not self.hidden and recid: record_add_field( self.record, tag='035', subfields=[('9', source), ('a', recid)] )
python
{ "resource": "" }
q263281
MARCXMLConversion.add_control_number
validation
def add_control_number(self, tag, value): """Add a control-number 00x for given tag with value.""" record_add_field(self.record, tag, controlfield_value=value)
python
{ "resource": "" }
q263282
MARCXMLConversion.update_subject_categories
validation
def update_subject_categories(self, primary, secondary, kb): """650 Translate Categories.""" category_fields = record_get_field_instances(self.record, tag='650', ind1='1', ind2='7') record_delete_fields(self.record, "650") for field in category_fields: for idx, (key, value) in enumerate(field[0]): if key == 'a': new_value = self.get_config_item(value, kb) if new_value != value: new_subs = [('2', secondary), ('a', new_value)] else: new_subs = [('2', primary), ('a', value)] record_add_field(self.record, "650", ind1="1", ind2="7", subfields=new_subs) break
python
{ "resource": "" }
q263283
FtpHandler.connect
validation
def connect(self): """ Connects and logins to the server. """ self._ftp.connect() self._ftp.login(user=self._username, passwd=self._passwd)
python
{ "resource": "" }
q263284
FtpHandler.download
validation
def download(self, source_file, target_folder=''): """ Downloads a file from the FTP server to target folder :param source_file: the absolute path for the file on the server it can be the one of the files coming from FtpHandler.dir(). :type source_file: string :param target_folder: relative or absolute path of the destination folder default is the working directory. :type target_folder: string """ current_folder = self._ftp.pwd() if not target_folder.startswith('/'): # relative path target_folder = join(getcwd(), target_folder) folder = os.path.dirname(source_file) self.cd(folder) if folder.startswith("/"): folder = folder[1:] destination_folder = join(target_folder, folder) if not os.path.exists(destination_folder): print("Creating folder", destination_folder) os.makedirs(destination_folder) source_file = os.path.basename(source_file) destination = join(destination_folder, source_file) try: with open(destination, 'wb') as result: self._ftp.retrbinary('RETR %s' % (source_file,), result.write) except error_perm as e: # source_file is a folder print(e) remove(join(target_folder, source_file)) raise self._ftp.cwd(current_folder)
python
{ "resource": "" }
q263285
FtpHandler.cd
validation
def cd(self, folder): """ Changes the working directory on the server. :param folder: the desired directory. :type folder: string """ if folder.startswith('/'): self._ftp.cwd(folder) else: for subfolder in folder.split('/'): if subfolder: self._ftp.cwd(subfolder)
python
{ "resource": "" }
q263286
FtpHandler.ls
validation
def ls(self, folder=''): """ Lists the files and folders of a specific directory default is the current working directory. :param folder: the folder to be listed. :type folder: string :returns: a tuple with the list of files in the folder and the list of subfolders in the folder. """ current_folder = self._ftp.pwd() self.cd(folder) contents = [] self._ftp.retrlines('LIST', lambda a: contents.append(a)) files = filter(lambda a: a.split()[0].startswith('-'), contents) folders = filter(lambda a: a.split()[0].startswith('d'), contents) files = map(lambda a: ' '.join(a.split()[8:]), files) folders = map(lambda a: ' '.join(a.split()[8:]), folders) self._ftp.cwd(current_folder) return files, folders
python
{ "resource": "" }
q263287
FtpHandler.mkdir
validation
def mkdir(self, folder): """ Creates a folder in the server :param folder: the folder to be created. :type folder: string """ current_folder = self._ftp.pwd() #creates the necessary folders on #the server if they don't exist folders = folder.split('/') for fld in folders: try: self.cd(fld) except error_perm: # folder does not exist self._ftp.mkd(fld) self.cd(fld) self.cd(current_folder)
python
{ "resource": "" }
q263288
FtpHandler.rm
validation
def rm(self, filename): """ Delete a file from the server. :param filename: the file to be deleted. :type filename: string """ try: self._ftp.delete(filename) except error_perm: # target is either a directory # either it does not exist try: current_folder = self._ftp.pwd() self.cd(filename) except error_perm: print('550 Delete operation failed %s ' 'does not exist!' % (filename,)) else: self.cd(current_folder) print('550 Delete operation failed %s ' 'is a folder. Use rmdir function ' 'to delete it.' % (filename,))
python
{ "resource": "" }
q263289
FtpHandler.rmdir
validation
def rmdir(self, foldername): """ Delete a folder from the server. :param foldername: the folder to be deleted. :type foldername: string """ current_folder = self._ftp.pwd() try: self.cd(foldername) except error_perm: print('550 Delete operation failed folder %s ' 'does not exist!' % (foldername,)) else: self.cd(current_folder) try: self._ftp.rmd(foldername) except error_perm: # folder not empty self.cd(foldername) contents = self.ls() #delete the files map(self._ftp.delete, contents[0]) #delete the subfolders map(self.rmdir, contents[1]) self.cd(current_folder) self._ftp.rmd(foldername)
python
{ "resource": "" }
q263290
FtpHandler.get_filesize
validation
def get_filesize(self, filename): """ Returns the filesize of a file :param filename: the full path to the file on the server. :type filename: string :returns: string representation of the filesize. """ result = [] def dir_callback(val): result.append(val.split()[4]) self._ftp.dir(filename, dir_callback) return result[0]
python
{ "resource": "" }
q263291
FtpHandler.upload
validation
def upload(self, filename, location=''): """ Uploads a file on the server to the desired location :param filename: the name of the file to be uploaded. :type filename: string :param location: the directory in which the file will be stored. :type location: string """ current_folder = self._ftp.pwd() self.mkdir(location) self.cd(location) fl = open(filename, 'rb') filename = filename.split('/')[-1] self._ftp.storbinary('STOR %s' % filename, fl) fl.close() self.cd(current_folder)
python
{ "resource": "" }
q263292
TextBlockParser.parse_data
validation
def parse_data(self, text, maxwidth, maxheight, template_dir, context, urlize_all_links): """ Parses a block of text indiscriminately """ # create a dictionary of user urls -> rendered responses replacements = {} user_urls = set(re.findall(URL_RE, text)) for user_url in user_urls: try: resource = oembed.site.embed(user_url, maxwidth=maxwidth, maxheight=maxheight) except OEmbedException: if urlize_all_links: replacements[user_url] = '<a href="%(LINK)s">%(LINK)s</a>' % {'LINK': user_url} else: context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) replacement = self.render_oembed( resource, user_url, template_dir=template_dir, context=context ) replacements[user_url] = replacement.strip() # go through the text recording URLs that can be replaced # taking note of their start & end indexes user_urls = re.finditer(URL_RE, text) matches = [] for match in user_urls: if match.group() in replacements: matches.append([match.start(), match.end(), match.group()]) # replace the URLs in order, offsetting the indices each go for indx, (start, end, user_url) in enumerate(matches): replacement = replacements[user_url] difference = len(replacement) - len(user_url) # insert the replacement between two slices of text surrounding the # original url text = text[:start] + replacement + text[end:] # iterate through the rest of the matches offsetting their indices # based on the difference between replacement/original for j in xrange(indx + 1, len(matches)): matches[j][0] += difference matches[j][1] += difference return mark_safe(text)
python
{ "resource": "" }
q263293
TextParser.parse_data
validation
def parse_data(self, text, maxwidth, maxheight, template_dir, context, urlize_all_links): """ Parses a block of text rendering links that occur on their own line normally but rendering inline links using a special template dir """ block_parser = TextBlockParser() lines = text.splitlines() parsed = [] for line in lines: if STANDALONE_URL_RE.match(line): user_url = line.strip() try: resource = oembed.site.embed(user_url, maxwidth=maxwidth, maxheight=maxheight) context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) except OEmbedException: if urlize_all_links: line = '<a href="%(LINK)s">%(LINK)s</a>' % {'LINK': user_url} else: context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) line = self.render_oembed( resource, user_url, template_dir=template_dir, context=context) else: line = block_parser.parse(line, maxwidth, maxheight, 'inline', context, urlize_all_links) parsed.append(line) return mark_safe('\n'.join(parsed))
python
{ "resource": "" }
q263294
login
validation
def login(email=None, password=None, api_key=None, application='Default', url=None, verify_ssl_certificate=True): """ Do the legwork of logging into the Midas Server instance, storing the API key and token. :param email: (optional) Email address to login with. If not set, the console will be prompted. :type email: None | string :param password: (optional) User password to login with. If not set and no 'api_key' is set, the console will be prompted. :type password: None | string :param api_key: (optional) API key to login with. If not set, password login with be used. :type api_key: None | string :param application: (optional) Application name to be used with 'api_key'. :type application: string :param url: (optional) URL address of the Midas Server instance to login to. If not set, the console will be prompted. :type url: None | string :param verify_ssl_certificate: (optional) If True, the SSL certificate will be verified :type verify_ssl_certificate: bool :returns: API token. :rtype: string """ try: input_ = raw_input except NameError: input_ = input if url is None: url = input_('Server URL: ') url = url.rstrip('/') if session.communicator is None: session.communicator = Communicator(url) else: session.communicator.url = url session.communicator.verify_ssl_certificate = verify_ssl_certificate if email is None: email = input_('Email: ') session.email = email if api_key is None: if password is None: password = getpass.getpass() session.api_key = session.communicator.get_default_api_key( session.email, password) session.application = 'Default' else: session.api_key = api_key session.application = application return renew_token()
python
{ "resource": "" }
q263295
renew_token
validation
def renew_token(): """ Renew or get a token to use for transactions with the Midas Server instance. :returns: API token. :rtype: string """ session.token = session.communicator.login_with_api_key( session.email, session.api_key, application=session.application) if len(session.token) < 10: # HACK to check for mfa being enabled one_time_pass = getpass.getpass('One-Time Password: ') session.token = session.communicator.mfa_otp_login( session.token, one_time_pass) return session.token
python
{ "resource": "" }
q263296
_create_or_reuse_item
validation
def _create_or_reuse_item(local_file, parent_folder_id, reuse_existing=False): """ Create an item from the local file in the Midas Server folder corresponding to the parent folder id. :param local_file: full path to a file on the local file system :type local_file: string :param parent_folder_id: id of parent folder on the Midas Server instance, where the item will be added :type parent_folder_id: int | long :param reuse_existing: (optional) whether to accept an existing item of the same name in the same location, or create a new one instead :type reuse_existing: bool """ local_item_name = os.path.basename(local_file) item_id = None if reuse_existing: # check by name to see if the item already exists in the folder children = session.communicator.folder_children( session.token, parent_folder_id) items = children['items'] for item in items: if item['name'] == local_item_name: item_id = item['item_id'] break if item_id is None: # create the item for the subdir new_item = session.communicator.create_item( session.token, local_item_name, parent_folder_id) item_id = new_item['item_id'] return item_id
python
{ "resource": "" }
q263297
_create_or_reuse_folder
validation
def _create_or_reuse_folder(local_folder, parent_folder_id, reuse_existing=False): """ Create a folder from the local file in the midas folder corresponding to the parent folder id. :param local_folder: full path to a directory on the local file system :type local_folder: string :param parent_folder_id: id of parent folder on the Midas Server instance, where the folder will be added :type parent_folder_id: int | long :param reuse_existing: (optional) whether to accept an existing folder of the same name in the same location, or create a new one instead :type reuse_existing: bool """ local_folder_name = os.path.basename(local_folder) folder_id = None if reuse_existing: # check by name to see if the folder already exists in the folder children = session.communicator.folder_children( session.token, parent_folder_id) folders = children['folders'] for folder in folders: if folder['name'] == local_folder_name: folder_id = folder['folder_id'] break if folder_id is None: # create the item for the subdir new_folder = session.communicator.create_folder(session.token, local_folder_name, parent_folder_id) folder_id = new_folder['folder_id'] return folder_id
python
{ "resource": "" }
q263298
_streaming_file_md5
validation
def _streaming_file_md5(file_path): """ Create and return a hex checksum using the MD5 sum of the passed in file. This will stream the file, rather than load it all into memory. :param file_path: full path to the file :type file_path: string :returns: a hex checksum :rtype: string """ md5 = hashlib.md5() with open(file_path, 'rb') as f: # iter needs an empty byte string for the returned iterator to halt at # EOF for chunk in iter(lambda: f.read(128 * md5.block_size), b''): md5.update(chunk) return md5.hexdigest()
python
{ "resource": "" }
q263299
_create_bitstream
validation
def _create_bitstream(file_path, local_file, item_id, log_ind=None): """ Create a bitstream in the given item. :param file_path: full path to the local file :type file_path: string :param local_file: name of the local file :type local_file: string :param log_ind: (optional) any additional message to log upon creation of the bitstream :type log_ind: None | string """ checksum = _streaming_file_md5(file_path) upload_token = session.communicator.generate_upload_token( session.token, item_id, local_file, checksum) if upload_token != '': log_trace = 'Uploading bitstream from {0}'.format(file_path) # only need to perform the upload if we haven't uploaded before # in this cae, the upload token would not be empty session.communicator.perform_upload( upload_token, local_file, filepath=file_path, itemid=item_id) else: log_trace = 'Adding a bitstream link in this item to an existing ' \ 'bitstream from {0}'.format(file_path) if log_ind is not None: log_trace += log_ind print(log_trace)
python
{ "resource": "" }