Search is not available for this dataset
text stringlengths 75 104k |
|---|
def request(self, url, method="GET", data=None, params=None, retry=True):
"""
Make a request to the NuHeat API
:param url: The URL to request
:param method: The type of request to make (GET, POST)
:param data: Data to be sent along with POST requests
:param params: Query... |
def handle_starttag(self, tag, attrs):
"""Return representation of html start tag and attributes."""
if tag in self.mathml_elements:
final_attr = ""
for key, value in attrs:
final_attr += ' {0}="{1}"'.format(key, value)
self.fed.append("<{0}{1}>".forma... |
def handle_endtag(self, tag):
"""Return representation of html end tag."""
if tag in self.mathml_elements:
self.fed.append("</{0}>".format(tag)) |
def html_to_text(cls, html):
"""Return stripped HTML, keeping only MathML."""
s = cls()
s.feed(html)
unescaped_data = s.unescape(s.get_data())
return escape_for_xml(unescaped_data, tags_to_keep=s.mathml_elements) |
def is_instance(self):
"""return True if callback is an instance of a class"""
ret = False
val = self.callback
if self.is_class(): return False
ret = not inspect.isfunction(val) and not inspect.ismethod(val)
# if is_py2:
# ret = isinstance(val, types.Instance... |
def is_function(self):
"""return True if callback is a vanilla plain jane function"""
if self.is_instance() or self.is_class(): return False
return isinstance(self.callback, (Callable, classmethod)) |
def merge_kwargs(self, kwargs):
"""these kwargs come from the @arg decorator, they are then merged into any
keyword arguments that were automatically generated from the main function
introspection"""
if kwargs:
self.parser_kwargs.update(kwargs)
#self.parser_kwargs['d... |
def merge_from_list(self, list_args):
"""find any matching parser_args from list_args and merge them into this
instance
list_args -- list -- an array of (args, kwargs) tuples
"""
def xs(name, parser_args, list_args):
"""build the generator of matching list_args"""
... |
def set_default(self, na):
"""this is used for introspection from the main() method when there is an
argument with a default value, this figures out how to set up the ArgParse
arguments"""
kwargs = {}
if isinstance(na, (type, types.FunctionType)):
# if foo=some_func t... |
def _fill_text(self, text, width, indent):
"""Overridden to not get rid of newlines
https://github.com/python/cpython/blob/2.7/Lib/argparse.py#L620"""
lines = []
for line in text.splitlines(False):
if line:
# https://docs.python.org/2/library/textwrap.html
... |
def parse_callback_args(self, raw_args):
"""This is the method that is called from Script.run(), this is the insertion
point for parsing all the arguments though on init this will find all args it
can, so this method pulls already found args from class variables"""
args = []
arg_... |
def find_args(self):
"""when a new parser is created this is the method that is called from its
__init__ method to find all the arguments"""
arg_info = self.arg_info
main = self.callback
cbi = CallbackInspect(main)
all_arg_names = set()
decorator_args = cbi.args
... |
def normalize_quiet_arg(self, arg_strings):
"""This is a hack to allow `--quiet` and `--quiet DI` to work correctly,
basically it goes through all arg_strings and if it finds --quiet it checks
the next argument to see if it is some combination of DIWEC, if it is then
it combines it to `-... |
def make_user_agent(component=None):
""" create string suitable for HTTP User-Agent header """
packageinfo = pkg_resources.require("harvestingkit")[0]
useragent = "{0}/{1}".format(packageinfo.project_name, packageinfo.version)
if component is not None:
useragent += " {0}".format(component)
r... |
def record_add_field(rec, tag, ind1='', ind2='', subfields=[],
controlfield_value=''):
"""Add a MARCXML datafield as a new child to a XML document."""
if controlfield_value:
doc = etree.Element("controlfield",
attrib={
"tag... |
def record_xml_output(rec, pretty=True):
"""Given a document, return XML prettified."""
from .html_utils import MathMLParser
ret = etree.tostring(rec, xml_declaration=False)
# Special MathML handling
ret = re.sub("(<)(([\/]?{0}))".format("|[\/]?".join(MathMLParser.mathml_elements)), '<\g<2>', re... |
def escape_for_xml(data, tags_to_keep=None):
"""Transform & and < to XML valid & and <.
Pass a list of tags as string to enable replacement of
'<' globally but keep any XML tags in the list.
"""
data = re.sub("&", "&", data)
if tags_to_keep:
data = re.sub(r"(<)(?![\/]?({0})\b)... |
def format_arxiv_id(arxiv_id):
"""Properly format arXiv IDs."""
if arxiv_id and "/" not in arxiv_id and "arXiv" not in arxiv_id:
return "arXiv:%s" % (arxiv_id,)
elif arxiv_id and '.' not in arxiv_id and arxiv_id.lower().startswith('arxiv:'):
return arxiv_id[6:] # strip away arxiv: for old i... |
def collapse_initials(name):
"""Remove the space between initials, eg T. A. --> T.A."""
if len(name.split(".")) > 1:
name = re.sub(r'([A-Z]\.)[\s\-]+(?=[A-Z]\.)', r'\1', name)
return name |
def fix_journal_name(journal, knowledge_base):
"""Convert journal name to Inspire's short form."""
if not journal:
return '', ''
if not knowledge_base:
return journal, ''
if len(journal) < 2:
return journal, ''
volume = ''
if (journal[-1] <= 'Z' and journal[-1] >= 'A') \
... |
def add_nations_field(authors_subfields):
"""Add correct nations field according to mapping in NATIONS_DEFAULT_MAP."""
from .config import NATIONS_DEFAULT_MAP
result = []
for field in authors_subfields:
if field[0] == 'v':
values = [x.replace('.', '') for x in field[1].split(', ')]
... |
def fix_dashes(string):
"""Fix bad Unicode special dashes in string."""
string = string.replace(u'\u05BE', '-')
string = string.replace(u'\u1806', '-')
string = string.replace(u'\u2E3A', '-')
string = string.replace(u'\u2E3B', '-')
string = unidecode(string)
return re.sub(r'--+', '-', string... |
def fix_title_capitalization(title):
"""Try to capitalize properly a title string."""
if re.search("[A-Z]", title) and re.search("[a-z]", title):
return title
word_list = re.split(' +', title)
final = [word_list[0].capitalize()]
for word in word_list[1:]:
if word.upper() in COMMON_AC... |
def convert_html_subscripts_to_latex(text):
"""Convert some HTML tags to latex equivalents."""
text = re.sub("<sub>(.*?)</sub>", r"$_{\1}$", text)
text = re.sub("<sup>(.*?)</sup>", r"$^{\1}$", text)
return text |
def download_file(from_url, to_filename=None,
chunk_size=1024 * 8, retry_count=3):
"""Download URL to a file."""
if not to_filename:
to_filename = get_temporary_file()
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=retry_count)
session.mou... |
def run_shell_command(commands, **kwargs):
"""Run a shell command."""
p = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
output, error = p.communicate()
return p.returncode, output, error |
def create_logger(name,
filename=None,
logging_level=logging.DEBUG):
"""Create a logger object."""
logger = logging.getLogger(name)
formatter = logging.Formatter(('%(asctime)s - %(name)s - '
'%(levelname)-8s - %(message)s'))
if file... |
def unzip(zipped_file, output_directory=None,
prefix="harvestingkit_unzip_", suffix=""):
"""Uncompress a zipped file from given filepath to an (optional) location.
If no location is given, a temporary folder will be generated inside
CFG_TMPDIR, prefixed with "apsharvest_unzip_".
"""
if no... |
def _do_unzip(zipped_file, output_directory):
"""Perform the actual uncompression."""
z = zipfile.ZipFile(zipped_file)
for path in z.namelist():
relative_path = os.path.join(output_directory, path)
dirname, dummy = os.path.split(relative_path)
try:
if relative_path.endswi... |
def locate(pattern, root=os.curdir):
"""Locate all files matching supplied filename pattern recursively."""
for path, dummy, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename) |
def punctuate_authorname(an):
"""Punctuate author names properly.
Expects input in the form 'Bloggs, J K' and will return 'Bloggs, J. K.'.
"""
name = an.strip()
parts = [x for x in name.split(',') if x != '']
ret_str = ''
for idx, part in enumerate(parts):
subparts = part.strip().sp... |
def convert_date_to_iso(value):
"""Convert a date-value to the ISO date standard."""
date_formats = ["%d %b %Y", "%Y/%m/%d"]
for dformat in date_formats:
try:
date = datetime.strptime(value, dformat)
return date.strftime("%Y-%m-%d")
except ValueError:
pass... |
def convert_date_from_iso_to_human(value):
"""Convert a date-value to the ISO date standard for humans."""
try:
year, month, day = value.split("-")
except ValueError:
# Not separated by "-". Space?
try:
year, month, day = value.split(" ")
except ValueError:
... |
def convert_images(image_list):
"""Convert list of images to PNG format.
@param: image_list ([string, string, ...]): the list of image files
extracted from the tarball in step 1
@return: image_list ([str, str, ...]): The list of image files when all
have been converted to PNG format.
"... |
def get_temporary_file(prefix="tmp_",
suffix="",
directory=None):
"""Generate a safe and closed filepath."""
try:
file_fd, filepath = mkstemp(prefix=prefix,
suffix=suffix,
dir=directory)... |
def return_letters_from_string(text):
"""Get letters from string only."""
out = ""
for letter in text:
if letter.isalpha():
out += letter
return out |
def license_is_oa(license):
"""Return True if license is compatible with Open Access"""
for oal in OA_LICENSES:
if re.search(oal, license):
return True
return False |
def _extract_package(self):
"""
Extract a package in a new temporary directory.
"""
self.path = mkdtemp(prefix="scoap3_package_", dir=CFG_TMPSHAREDDIR)
self.logger.debug("Extracting package: %s" % (self.package_name,))
scoap3utils_extract_package(self.package_name, self.p... |
def _crawl_elsevier_and_find_main_xml(self):
"""
A package contains several subdirectory corresponding to each article.
An article is actually identified by the existence of a main.pdf and
a main.xml in a given directory.
"""
self.found_articles = []
if not self.p... |
def _crawl_elsevier_and_find_issue_xml(self):
"""
Information about the current volume, issue, etc. is available
in a file called issue.xml that is available in a higher directory.
"""
self._found_issues = []
if not self.path and not self.package_name:
for iss... |
def _normalize_issue_dir_with_dtd(self, path):
"""
issue.xml from Elsevier assume the existence of a local DTD.
This procedure install the DTDs next to the issue.xml file
and normalize it using xmllint in order to resolve all namespaces
and references.
"""
if exis... |
def _normalize_article_dir_with_dtd(self, path):
"""
main.xml from Elsevier assume the existence of a local DTD.
This procedure install the DTDs next to the main.xml file
and normalize it using xmllint in order to resolve all namespaces
and references.
"""
if exis... |
def get_publication_date(self, xml_doc):
"""Return the best effort start_date."""
start_date = get_value_in_tag(xml_doc, "prism:coverDate")
if not start_date:
start_date = get_value_in_tag(xml_doc, "prism:coverDisplayDate")
if not start_date:
start_date = ... |
def get_record(self, path=None, no_pdf=False,
test=False, refextract_callback=None):
"""Convert a record to MARCXML format.
:param path: path to a record.
:type path: string
:param test: flag to determine if it is a test call.
:type test: bool
:param r... |
def extract_oembeds(text, args=None):
"""
Extract oembed resources from a block of text. Returns a list
of dictionaries.
Max width & height can be specified:
{% for embed in block_of_text|extract_oembeds:"400x300" %}
Resource type can be specified:
{% for photo_embed in block_of_text|extr... |
def strip_oembeds(text, args=None):
"""
Take a block of text and strip all the embeds from it, optionally taking
a maxwidth, maxheight / resource_type
Usage:
{{ post.content|strip_embeds }}
{{ post.content|strip_embeds:"600x600xphoto" }}
{{ post.content|strip_embeds:"video" }}... |
def do_oembed(parser, token):
"""
A node which parses everything between its two nodes, and replaces any links
with OEmbed-provided objects, if possible.
Supports two optional argument, which is the maximum width and height,
specified like so:
{% oembed 640x480 %}http://www.viddler.com/explore... |
def do_autodiscover(parser, token):
"""
Generates a <link> tag with oembed autodiscovery bits for an object.
{% oembed_autodiscover video %}
"""
args = token.split_contents()
if len(args) != 2:
raise template.TemplateSyntaxError('%s takes an object as its parameter.' % args[0])
... |
def do_url_scheme(parser, token):
"""
Generates a <link> tag with oembed autodiscovery bits.
{% oembed_url_scheme %}
"""
args = token.split_contents()
if len(args) != 1:
raise template.TemplateSyntaxError('%s takes no parameters.' % args[0])
return OEmbedURLSchemeNode() |
def exit(mod_name=""):
"""A stand-in for the normal sys.exit()
all the magic happens here, when this is called at the end of a script it will
figure out all the available commands and arguments that can be passed in,
then handle exiting the script and returning the status code.
:Example:
... |
def parser(self):
"""return the parser for the current name"""
module = self.module
subcommands = self.subcommands
if subcommands:
module_desc = inspect.getdoc(module)
parser = Parser(description=module_desc, module=module)
subparsers = parser.add_sub... |
def module(self):
"""load the module so we can actually run the script's function"""
# we have to guard this value because:
# https://thingspython.wordpress.com/2010/09/27/another-super-wrinkle-raising-typeerror/
if not hasattr(self, '_module'):
if "__main__" in sys.modules:
... |
def body(self):
"""get the contents of the script"""
if not hasattr(self, '_body'):
self._body = inspect.getsource(self.module)
return self._body |
def run(self, raw_args):
"""parse and import the script, and then run the script's main function"""
parser = self.parser
args, kwargs = parser.parse_callback_args(raw_args)
callback = kwargs.pop("main_callback")
if parser.has_injected_quiet():
levels = kwargs.pop("qu... |
def call_path(self, basepath):
"""return that path to be able to call this script from the passed in
basename
example --
basepath = /foo/bar
self.path = /foo/bar/che/baz.py
self.call_path(basepath) # che/baz.py
basepath -- string -- the directory yo... |
def parse(self):
"""load the script and set the parser and argument info
I feel that this is way too brittle to be used long term, I think it just
might be best to import the stupid module, the thing I don't like about that
is then we import basically everything, which seems bad?
... |
def can_run_from_cli(self):
"""return True if this script can be run from the command line"""
ret = False
ast_tree = ast.parse(self.body, self.path)
calls = self._find_calls(ast_tree, __name__, "exit")
for call in calls:
if re.search("{}\(".format(re.escape(call)), se... |
def register_field(cls, field):
"""
Handles registering the fields with the FieldRegistry and creating a
post-save signal for the model.
"""
FieldRegistry.add_field(cls, field)
signals.post_save.connect(handle_save_embeds, sender=cls,
dispatch_uid='%s.%s.%s' % \
(cl... |
def contribute_to_class(self, cls, name):
"""
I need a way to ensure that this signal gets created for all child
models, and since model inheritance doesn't have a 'contrubite_to_class'
style hook, I am creating a fake virtual field which will be added to
all subclasses and handl... |
def render_oembed(self, oembed_resource, original_url, template_dir=None,
context=None):
"""
Render the oembed resource and return as a string.
Template directory will always fall back to 'oembed/[type].html', but
a custom template dir can be passed in usin... |
def parse(self, text, maxwidth=None, maxheight=None, template_dir=None,
context=None, urlize_all_links=CONSUMER_URLIZE_ALL):
"""
Scans a block of text, replacing anything matching a provider pattern
with an OEmbed html snippet, if possible.
Templates should be stor... |
def size_to_nearest(width=None, height=None, allowed_sizes=OEMBED_ALLOWED_SIZES,
force_fit=False):
"""
Generate some dimensions for resizing an object. This function DOES NOT handle
scaling, it simply calculates maximums. These values should then be passed to
the resize() method wh... |
def fetch_url(url, method='GET', user_agent='django-oembed', timeout=SOCKET_TIMEOUT):
"""
Fetch response headers and data from a URL, raising a generic exception
for any kind of failure.
"""
sock = httplib2.Http(timeout=timeout)
request_headers = {
'User-Agent': user_agent,
'Acce... |
def relative_to_full(url, example_url):
"""
Given a url which may or may not be a relative url, convert it to a full
url path given another full url as an example
"""
if re.match('https?:\/\/', url):
return url
domain = get_domain(example_url)
if domain:
return '%s%s' % (doma... |
def mock_request():
"""
Generate a fake request object to allow oEmbeds to use context processors.
"""
current_site = Site.objects.get_current()
request = HttpRequest()
request.META['SERVER_NAME'] = current_site.domain
return request |
def load_class(path):
"""
dynamically load a class given a string of the format
package.Class
"""
package, klass = path.rsplit('.', 1)
module = import_module(package)
return getattr(module, klass) |
def cleaned_sites():
"""
Create a list of tuples mapping domains from the sites table to their
site name. The domains will be cleaned into regexes that may be
more permissive than the site domain is in the db.
[(domain_regex, domain_name, domain_string), ...]
"""
mappings = {}
for ... |
def get_record(self, fileName, ref_extract_callback=None):
"""
Gets the Marc xml of the files in xaml_jp directory
:param fileName: the name of the file to parse.
:type fileName: string
:param refextract_callback: callback to be used to extract
... |
def get_record_rich(self, filename, ref_extract_callback=None):
"""
Gets the Marc xml of the files in xaml_rich directory
:param fileName: the name of the file to parse.
:type fileName: string
:returns: a string with the marc xml version of the file.
"""
self.do... |
def get_record(self):
"""Override the base get_record."""
self.update_system_numbers()
self.add_systemnumber("CDS")
self.fields_list = [
"024", "041", "035", "037", "088", "100",
"110", "111", "242", "245", "246", "260",
"269", "300", "502", "650", "65... |
def determine_collections(self):
"""Try to determine which collections this record should belong to."""
for value in record_get_field_values(self.record, '980', code='a'):
if 'NOTE' in value.upper():
self.collections.add('NOTE')
if 'THESIS' in value.upper():
... |
def is_published(self):
"""Check fields 980 and 773 to see if the record has already been published.
:return: True is published, else False
"""
field980 = record_get_field_instances(self.record, '980')
field773 = record_get_field_instances(self.record, '773')
for f980 in... |
def add_cms_link(self):
"""Special handling if record is a CMS NOTE."""
intnote = record_get_field_values(self.record, '690',
filter_subfield_code="a",
filter_subfield_value='INTNOTE')
if intnote:
val... |
def update_system_numbers(self):
"""035 Externals."""
scn_035_fields = record_get_field_instances(self.record, '035')
forbidden_values = ["cercer",
"inspire",
"xx",
"cern annual report",
... |
def update_reportnumbers(self):
"""Handle reportnumbers. """
rep_088_fields = record_get_field_instances(self.record, '088')
for field in rep_088_fields:
subs = field_get_subfields(field)
if '9' in subs:
for val in subs['9']:
if val.sta... |
def update_date(self):
"""269 Date normalization."""
for field in record_get_field_instances(self.record, '269'):
for idx, (key, value) in enumerate(field[0]):
if key == "c":
field[0][idx] = ("c", convert_date_to_iso(value))
record_dele... |
def update_pagenumber(self):
"""300 page number."""
for field in record_get_field_instances(self.record, '300'):
for idx, (key, value) in enumerate(field[0]):
if key == 'a':
if "mult." not in value and value != " p":
field[0][idx] =... |
def update_authors(self):
"""100 & 700 punctuate author names."""
author_names = record_get_field_instances(self.record, '100')
author_names.extend(record_get_field_instances(self.record, '700'))
for field in author_names:
subs = field_get_subfields(field)
if 'i' ... |
def update_thesis_supervisors(self):
"""700 -> 701 Thesis supervisors."""
for field in record_get_field_instances(self.record, '700'):
record_add_field(self.record, '701', subfields=field[0])
record_delete_fields(self.record, '700') |
def update_thesis_information(self):
"""501 degree info - move subfields."""
fields_501 = record_get_field_instances(self.record, '502')
for idx, field in enumerate(fields_501):
new_subs = []
for key, value in field[0]:
if key == 'a':
n... |
def update_keywords(self):
"""653 Free Keywords."""
for field in record_get_field_instances(self.record, '653', ind1='1'):
subs = field_get_subfields(field)
new_subs = []
if 'a' in subs:
for val in subs['a']:
new_subs.extend([('9', ... |
def update_experiments(self):
"""Experiment mapping."""
# 693 Remove if 'not applicable'
for field in record_get_field_instances(self.record, '693'):
subs = field_get_subfields(field)
all_subs = subs.get('a', []) + subs.get('e', [])
if 'not applicable' in [x.l... |
def update_collaboration(self):
"""710 Collaboration."""
for field in record_get_field_instances(self.record, '710'):
subs = field_get_subfield_instances(field)
for idx, (key, value) in enumerate(subs[:]):
if key == '5':
subs.pop(idx)
... |
def update_journals(self):
"""773 journal translations."""
for field in record_get_field_instances(self.record, '773'):
subs = field_get_subfield_instances(field)
new_subs = []
for idx, (key, value) in enumerate(subs):
if key == 'p':
... |
def update_links_and_ffts(self):
"""FFT (856) Dealing with graphs."""
figure_counter = 0
for field in record_get_field_instances(self.record,
tag='856',
ind1='4'):
subs = field_get_subfiel... |
def _extract_packages(self):
"""
Extract a package in a new directory.
"""
self.path_unpacked = []
if not hasattr(self, "retrieved_packages_unpacked"):
self.retrieved_packages_unpacked = [self.package_name]
for path in self.retrieved_packages_unpacked:
... |
def _crawl_springer_and_find_main_xml(self):
"""
A package contains several subdirectory corresponding to each article.
An article is actually identified by the existence of a main.pdf and
a main.xml in a given directory.
"""
self.found_articles = []
def visit(ar... |
def _normalize_article_dir_with_dtd(self, path):
"""
TODO: main.xml from Springer assume the existence of a local DTD.
This procedure install the DTDs next to the main.xml file
and normalize it using xmllint in order to resolve all namespaces
and references.
"""
f... |
def add_all_filters(pelican):
"""Add (register) all filters to Pelican."""
pelican.env.filters.update({'datetime': filters.datetime})
pelican.env.filters.update({'article_date': filters.article_date})
pelican.env.filters.update({'breaking_spaces': filters.breaking_spaces})
pelican.env.filters.update... |
def create_field(subfields=None, ind1=' ', ind2=' ', controlfield_value='',
global_position=-1):
"""
Return a field created with the provided elements.
Global position is set arbitrary to -1.
"""
if subfields is None:
subfields = []
ind1, ind2 = _wash_indicators(ind1, ... |
def create_records(marcxml, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='',
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""
Create a list of records from the marcxml description.
:returns: a list of objects initiated b... |
def create_record(marcxml=None, verbose=CFG_BIBRECORD_DEFAULT_VERBOSE_LEVEL,
correct=CFG_BIBRECORD_DEFAULT_CORRECT, parser='',
sort_fields_by_indicators=False,
keep_singletons=CFG_BIBRECORD_KEEP_SINGLETONS):
"""Create a record object from the marcxml description... |
def filter_field_instances(field_instances, filter_subcode, filter_value,
filter_mode='e'):
"""Filter the given field.
Filters given field and returns only that field instances that contain
filter_subcode with given filter_value. As an input for search function
accepts output... |
def record_drop_duplicate_fields(record):
"""
Return a record where all the duplicate fields have been removed.
Fields are considered identical considering also the order of their
subfields.
"""
out = {}
position = 0
tags = sorted(record.keys())
for tag in tags:
fields = rec... |
def records_identical(rec1, rec2, skip_005=True, ignore_field_order=False,
ignore_subfield_order=False,
ignore_duplicate_subfields=False,
ignore_duplicate_controlfields=False):
"""
Return True if rec1 is identical to rec2.
It does so regardl... |
def record_get_field_instances(rec, tag="", ind1=" ", ind2=" "):
"""
Return the list of field instances for the specified tag and indications.
Return empty list if not found.
If tag is empty string, returns all fields
Parameters (tag, ind1, ind2) can contain wildcard %.
:param rec: a record s... |
def record_add_field(rec, tag, ind1=' ', ind2=' ', controlfield_value='',
subfields=None, field_position_global=None,
field_position_local=None):
"""
Add a new field into the record.
If field_position_global or field_position_local is specified then
this method... |
def record_delete_field(rec, tag, ind1=' ', ind2=' ',
field_position_global=None, field_position_local=None):
"""
Delete the field with the given position.
If global field position is specified, deletes the field with the
corresponding global field position.
If field_positio... |
def record_delete_fields(rec, tag, field_positions_local=None):
"""
Delete all/some fields defined with MARC tag 'tag' from record 'rec'.
:param rec: a record structure.
:type rec: tuple
:param tag: three letter field.
:type tag: string
:param field_position_local: if set, it is the list of... |
def record_add_fields(rec, tag, fields, field_position_local=None,
field_position_global=None):
"""
Add the fields into the record at the required position.
The position is specified by the tag and the field_position_local in the
list of fields.
:param rec: a record structure... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.