docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Returns the same date 1 year ago.
Args:
date (datetime or datetime.date)
Returns:
(datetime or datetime.date)
Raises:
- | def last_year(date_):
day = 28 if date_.day == 29 and date_.month == 2 else date_.day
return datetime.date(date_.year-1, date_.month, day) | 1,126,087 |
Turns a string into a datetime.time object. This will only work if the
format can be "guessed", so the string must have one of the formats from
VALID_TIME_FORMATS_TEXT.
Args:
time_str (str) a string that represents a date
Returns:
datetime.time object
Raises:
ValueError if ... | def timestr2time(time_str):
if any(c not in '0123456789:' for c in time_str):
raise ValueError('Illegal character in time string')
if time_str.count(':') == 2:
h, m, s = time_str.split(':')
elif time_str.count(':') == 1:
h, m = time_str.split(':')
s = '00'
elif len(t... | 1,126,088 |
Turns a datetime.time object into a string. The string must have one of the
formats from VALID_TIME_FORMATS_TEXT to make it compatible with
timestr2time.
Args:
time (datetime.time) the time to be translated
fmt (str) a format string.
Returns:
(str) that represents a time.
R... | def time2timestr(time, fmt='hhmmss'):
if fmt.count(':') == 2:
if not fmt.index('h') < fmt.index('m') < fmt.index('s'):
raise ValueError('Invalid format string. {}'.format(
VALID_TIME_FORMATS_TEXT))
h, m, s = fmt.split(':')
elif fmt.count(':') == 1:
if... | 1,126,089 |
Find all PDF files in the specified directory.
Args:
source_directory (str): The source directory.
Returns:
list(str): Filepaths to all PDF files in the specified directory.
Raises:
ValueError | def _get_pdf_filenames_at(source_directory):
if not os.path.isdir(source_directory):
raise ValueError("%s is not a directory!" % source_directory)
return [os.path.join(source_directory, filename)
for filename in os.listdir(source_directory)
if filename.endswith(PDF_EXTENSION... | 1,126,418 |
Compress a single PDF file.
Args:
filepath (str): Path to the PDF file.
output_path (str): Output path.
ghostscript_binary (str): Name/alias of the Ghostscript binary.
Raises:
ValueError
FileNotFoundError | def compress_pdf(filepath, output_path, ghostscript_binary):
if not filepath.endswith(PDF_EXTENSION):
raise ValueError("Filename must end with .pdf!\n%s does not." % filepath)
try:
file_size = os.stat(filepath).st_size
if file_size < FILE_SIZE_LOWER_LIMIT:
LOGGER.info(NO... | 1,126,419 |
Compress all PDF files in the current directory and place the output in the
given output directory. This is a generator function that first yields the amount
of files to be compressed, and then yields the output path of each file.
Args:
source_directory (str): Filepath to the source directory.
... | def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary):
source_paths = _get_pdf_filenames_at(source_directory)
yield len(source_paths)
for source_path in source_paths:
output = os.path.join(output_directory, os.path.basename(source_path))
compress_pdf(source_... | 1,126,420 |
Returns a triplestore connection
args:
attr_name: The name the connection will be assigned in the
config manager
params: The paramaters of the connection
kwargs:
log_level: logging level to use | def make_tstore_conn(params, **kwargs):
log.setLevel(params.get('log_level', __LOG_LEVEL__))
log.debug("\n%s", params)
params.update(kwargs)
try:
vendor = RdfwConnections['triplestore'][params.get('vendor')]
except KeyError:
vendor = RdfwConnections['triplestore']['blazegraph']
... | 1,126,895 |
returns the specified connection
args:
conn_name: the name of the connection | def get(self, conn_name, default=None, **kwargs):
if isinstance(conn_name, RdfwConnections):
return conn_name
try:
return self.conns[conn_name]
except KeyError:
if default:
return self.get(default, **kwargs)
raise LookupEr... | 1,126,903 |
Takes a list of connections and sets them in the manager
args:
conn_list: list of connection defitions | def load(self, conn_list, **kwargs):
for conn in conn_list:
conn['delay_check'] = kwargs.get('delay_check', False)
self.set_conn(**conn)
if kwargs.get('delay_check'):
test = self.wait_for_conns(**kwargs)
if not test:
log.critical("... | 1,126,904 |
delays unitil all connections are working
args:
timeout: number of seconds to try to connecting. Error out when
timeout is reached
start_delay: number of seconds to wait before checking status
interval: number of seconds to wait betwee... | def wait_for_conns(self, timeout=60, start_delay=0, interval=5, **kwargs):
log.setLevel(kwargs.get('log_level',self.log_level))
timestamp = time.time()
last_check = time.time() + start_delay - interval
last_delay_notification = time.time() - interval
timeout += 1
... | 1,126,906 |
generate datasets list to activate
args:
settings: dictionary
from settings file
argv: list
from sys.argv | def generate_datasets_list(settings, argv):
datasets_string_list = settings["DATASETS_LIST"]
datasets_list = []
if len(argv) == 2:
try:
datasets_items = datasets_string_list.iteritems()
except AttributeError:
datasets_items = datasets_string_list.items()
... | 1,127,060 |
Return source of the `link` whether it is filename or url.
Args:
link (str): Filename or URL.
Returns:
str: Content.
Raises:
UserWarning: When the `link` couldn't be resolved. | def _get_source(link):
if link.startswith("http://") or link.startswith("https://"):
down = httpkie.Downloader()
return down.download(link)
if os.path.exists(link):
with open(link) as f:
return f.read()
raise UserWarning("html: '%s' is neither URL or data!" % link) | 1,127,085 |
Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence.
Args:
addr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format.
reverse (bool): True if the byte ordering should be reversed in the output.
Returns:
A bytearray containing the converted address. | def fmt_addr_raw(addr, reverse=True):
addr = addr.replace(':', '')
raw_addr = [int(addr[i:i+2], 16) for i in range(0, len(addr), 2)]
if reverse:
raw_addr.reverse()
# for Python 2, this needs to be a string instead of a bytearray
if sys.version_info[0] == 2:
return str(bytearray... | 1,127,598 |
Return a pretty-printed (hex style) version of a binary string.
Args:
raw (bytes): any sequence of bytes
reverse (bool): True if output should be in reverse order.
Returns:
Hex string corresponding to input byte sequence. | def pp_hex(raw, reverse=True):
if not reverse:
return ''.join(['{:02x}'.format(v) for v in bytearray(raw)])
return ''.join(reversed(['{:02x}'.format(v) for v in bytearray(raw)])) | 1,127,599 |
Check whether a specified user is able to do a specified workshift.
Parameters:
workshift_profile is the workshift profile for a user
shift is a weekly recurring workshift
Returns:
True if the user has enough free time between the shift's start time
and end time to do the shi... | def is_available(workshift_profile, shift):
if shift.week_long:
return True
start_time = (
shift.start_time
if shift.start_time is not None
else time(hour=0)
)
end_time = (
shift.end_time
if shift.end_time is not None
else time(hour=23, minut... | 1,127,614 |
Validate timestamp specified by request.
See `validate.request` for additional info.
Args:
stamp: str. Time request was made as ISO 8601 timestamp.
tolerance: int. Number of seconds request remains valid from timestamp.
Returns
bool: True if valid, False otherwise. | def timestamp(stamp, tolerance=150):
try:
tolerance = datetime.timedelta(0, tolerance)
timestamp_low = dateutil.parser.parse(stamp)
timestamp_high = timestamp_low + tolerance
now = datetime.datetime.now(timestamp_low.tzinfo)
except ValueError:
return False
retur... | 1,127,740 |
Validate URL specified by SignatureCertChainUrl.
See `validate.request` for additional info.
Args:
url: str. SignatureCertChainUrl header value sent by request.
Returns:
bool: True if valid, False otherwise. | def signature_cert_chain_url(url):
r = urlparse(url)
if not r.scheme.lower() == 'https':
warnings.warn('Certificate URL scheme is invalid.')
return False
if not r.hostname.lower() == 's3.amazonaws.com':
warnings.warn('Certificate URL hostname is invalid.')
return False
... | 1,127,741 |
Retrieve and parse PEM-encoded X.509 certificate chain.
See `validate.request` for additional info.
Args:
url: str. SignatureCertChainUrl header value sent by request.
Returns:
list or bool: If url is valid, returns the certificate chain as a list
of cryptography.hazmat.backen... | def retrieve(url):
try:
pem_data = urlopen(url).read()
except (ValueError, HTTPError):
warnings.warn('Certificate URL is invalid.')
return False
if sys.version >= '3':
try:
pem_data = pem_data.decode()
except(UnicodeDecodeError):
warnings... | 1,127,742 |
Parse PEM-encoded X.509 certificate chain.
Args:
pem_data: str. PEM file retrieved from SignatureCertChainUrl.
Returns:
list or bool: If url is valid, returns the certificate chain as a list
of cryptography.hazmat.backends.openssl.x509._Certificate
certificates where ce... | def _parse_pem_data(pem_data):
sep = '-----BEGIN CERTIFICATE-----'
cert_chain = [six.b(sep + s) for s in pem_data.split(sep)[1:]]
certs = []
load_cert = x509.load_pem_x509_certificate
for cert in cert_chain:
try:
certs.append(load_cert(cert, default_backend()))
excep... | 1,127,743 |
Validate PEM-encoded X.509 certificate chain.
See `validate.request` for additional info.
Args:
certs: list. The certificate chain as a list of
cryptography.hazmat.backends.openssl.x509._Certificate certificates.
See `validate.retrieve` to create certs obj.
Returns:
... | def cert_chain(certs):
if len(certs) < 2:
warnings.warn('Certificate chain contains < 3 certificates.')
return False
cert = certs[0]
today = datetime.datetime.today()
if not today > cert.not_valid_before:
warnings.warn('Certificate Not Before date is invalid.')
retu... | 1,127,744 |
Validate data request signature.
See `validate.request` for additional info.
Args:
cert: cryptography.hazmat.backends.openssl.x509._Certificate. The Amazon
signing certificate.
sig: str. Signature header value sent by request.
body: str. HTTPS request body.
Returns:
... | def signature(cert, sig, body):
body = six.b(body)
sig = base64.decodestring(sig)
padder = padding.PKCS1v15()
public_key = cert.public_key()
try:
public_key.verify(sig, body, padder, hashes.SHA1())
return True
except InvalidSignature:
warnings.warn('Signature verifi... | 1,127,745 |
Validate request application id matches true application id.
Verifying the Application ID matches: https://goo.gl/qAdqe4.
Args:
app_id: str. Request application_id.
Returns:
bool: True if valid, False otherwise. | def application_id(self, app_id):
if self.app_id != app_id:
warnings.warn('Application ID is invalid.')
return False
return True | 1,127,747 |
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``.
Args:
val: Some value.
Returns:
``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`.
Examples:
>>> Option.maybe(0)
Some(0)
>>> Option.maybe(None)... | def maybe(cls, val: Optional[T]) -> 'Option[T]':
return cast('Option[T]', NONE) if val is None else cls.Some(val) | 1,127,763 |
Returns the contained value or computes it from ``callback``.
Args:
callback: The the default callback.
Returns:
The contained value if the :py:class:`Option` is ``Some``,
otherwise ``callback()``.
Examples:
>>> Some(0).unwrap_or_else(lambda: 11... | def unwrap_or_else(self, callback: Callable[[], U]) -> Union[T, U]:
return self._val if self._is_some else callback() | 1,127,766 |
Applies the ``callback`` with the contained value as its argument or
returns :py:data:`NONE`.
Args:
callback: The callback to apply to the contained value.
Returns:
The ``callback`` result wrapped in an :class:`Option` if the
contained value is ``Some``, oth... | def map(self, callback: Callable[[T], U]) -> 'Option[U]':
return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE) | 1,127,767 |
This function will add a file handler to a log with the provided level.
Args:
lvl (int): The severity level of messages printed to the file with
the file handler, default = 1. | def addFileHandler(self,filename='', dr='',lvl=1):
fname = self.name
if filename != '':
fname = filename
if '.' not in fname:
fname+='.log'
fh = logging.FileHandler(os.path.join(dr,fname))
fh.setLevel(lvl)
frmtString = '%(asctime)s - %(nam... | 1,128,059 |
This function will add a stream handler to a log with the provided level.
Args:
lvl (int): The severity level of messages printed to the screen with
the stream handler, default = 20. | def addStreamHandler(self,lvl=20):
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(lvl)
sFrmt = logging.Formatter('%(message)s')
if False:
#Another format example
sFrmt = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
sh.setFormatt... | 1,128,060 |
Log all Key=value for every key in a dictionary.
Args:
d (dictionary): A standard python dictionary. | def logDict(self,d):
keys = list(d.keys())
keys.sort()
s = "\n"+"-"*78+"\n"+" "*20+"dictionary provided contains:\n"+"-"*78+"\n"
for key in keys:
s+=key+" = "+repr(d[key])+"\n"
self.fileonly(s+"-"*78+"\n") | 1,128,062 |
Creates a new object MFTHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Return... | def create_from_binary(cls, ignore_signature_check, binary_view):
sig, fx_offset, fx_count, lsn, seq_number, hard_link_count, first_attr_offset, \
usage_flags, entry_len, alloc_len, base_record, next_attr_id, record_n = \
cls._REPR.unpack(binary_view[:cls._REPR.size])
baad ... | 1,128,358 |
Loads all the attributes of an entry.
Once executed, all the attributes should have been loaded in the
attribute *attrs* instance attribute.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
... | def _load_attributes(self, mft_config, attrs_view):
offset = 0
load_attrs = mft_config.attribute_load_list
while (attrs_view[offset:offset+4] != b'\xff\xff\xff\xff'):
attr_type, attr_len, non_resident = _get_attr_info(attrs_view[offset:])
if attr_type in load_at... | 1,128,371 |
Merge two entries.
Allow the merging of two MFTEntries copying the attributes to the correct
place and the datastreams.
Args:
source_entry (:obj:`MFTEntry`) - Source entry where the data will be
copied from | def merge_entries(self, source_entry):
#TODO should we change this to an overloaded iadd?
#TODO I really don't like this. We are spending cycles to load things that are going to be discarted. Check another way.
#copy the attributes
for list_attr in source_entry.attrs.values():
... | 1,128,372 |
Loads the new excel format files. Old format files will automatically get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
... | def get_data_xlsx(file_name, file_contents=None, on_demand=False):
return get_data_xls(file_name, file_contents=file_contents, on_demand=on_demand) | 1,128,425 |
Loads the old excel format files. New format files will automatically
get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
... | def get_data_xls(file_name, file_contents=None, on_demand=False):
def tuple_to_iso_date(tuple_date):
(y,m,d, hh,mm,ss) = tuple_date
non_zero = lambda n: n!=0
date = "%04d-%02d-%02d" % (y,m,d) if list(filter(non_zero, (y,m,d))) else ''
time = "T%02d:%02... | 1,128,426 |
Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
... | def get_data_excel_xml(file_name, file_contents=None, on_demand=False):
# NOTE this method is inefficient and uses code that's not of the highest quality
if file_contents:
xml_file = BytesIO(file_contents)
else:
xml_file = file_name
book = xmlparse.ParseExcelXMLFile(xml_file)
ro... | 1,128,427 |
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name... | def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False):
def yield_csv(csv_contents, csv_file):
try:
for line in csv_contents:
yield line
finally:
try:
csv_file.close()
except:
pass
... | 1,128,428 |
Writes 2D tables to file.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file (determines type).
worksheet_names: A list of worksheet names (optional). | def write(data, file_name, worksheet_names=None):
if re.search(XML_EXT_REGEX, file_name):
return write_xml(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLSX_EXT_REGEX, file_name):
return write_xlsx(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLS_... | 1,128,429 |
Writes out to old excel format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
worksheet_names: A list of worksheet names (optional). | def write_xls(data, file_name, worksheet_names=None):
workbook = xlwt.Workbook()
for sheet_index, sheet_data in enumerate(data):
if worksheet_names and sheet_index < len(worksheet_names) and worksheet_names[sheet_index]:
name = worksheet_names[sheet_index]
else:
name... | 1,128,430 |
Writes out to csv format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file. | def write_csv(data, file_name, encoding='utf-8'):
name_extension = len(data) > 1
root, ext = os.path.splitext(file_name)
for i, sheet in enumerate(data):
fname = file_name if not name_extension else root+"_"+str(i)+ext
with open(fname, 'wb') as date_file:
csv_file = csv.wri... | 1,128,431 |
Registers a list of Rml defintions objects
Args:
-----
def_list: list of objects defining the rml definitons | def register_defs(self, def_list, **kwargs):
for item in def_list:
if isinstance(item, tuple):
self.register_rml_def(*item, **kwargs)
elif isinstance(item, dict):
cp_kwargs = kwargs.copy()
item.update(kwargs)
self.r... | 1,129,162 |
Registers the rml file locations for easy access
Args:
-----
location_type: ['package_all',
'package_file',
'directory',
'filepath']
location: The correlated location string based on the location... | def register_rml_def(self,
location_type,
location,
filename=None,
**kwargs):
if location_type == 'directory':
self.register_directory(location, **kwargs)
elif location_type == 'filep... | 1,129,163 |
Registers the filepath for an rml mapping
Args:
-----
filepath: the path the rml file | def register_rml(self, filepath, **kwargs):
name = os.path.split(filepath)[-1]
if name in self.rml_maps and self.rml_maps[name] != filepath:
raise Exception("RML name already registered. Filenames must be "
"unique.",
(self.rml... | 1,129,164 |
Instantiates a RmlProcessor and registers it in the manager
Args:
-----
name: the name to register the processor
mappings: the list RML mapping definitions to use
processor_type: the name of the RML processor to use | def make_processor(self, name, mappings, processor_type, **kwargs):
from .processor import Processor
if self.processors.get(name):
raise LookupError("processor has already been created")
if isinstance(mappings, list):
mappings = [self.get_rml(item) for item in ma... | 1,129,166 |
Initialize :class:`RstToPdf` class.
Args:
style_path (str): Path to the style for the PDF.
header (str, default None): Header which will be rendered to each page.
footer (str, default FOOTER): Footer, which will be rendered to each
page. See :attr:`FOOTER` for details.
R... | def _init_pdf(style_path, header=None, footer=FOOTER):
return RstToPdf(
language="cs",
font_path=[
"/usr/share/fonts",
"/usr/share/fonts/truetype/",
'.',
'/usr/local/lib/python2.7/dist-packages/rst2pdf/fonts'
],
stylesheets=[
... | 1,129,305 |
Generate code, which make sure that `tag_name` has enoug items.
Args:
tag_name (str): Name of the container.
index (int): Index of the item you want to obtain from container.
notfoundmsg (str): Raise :class:`.UserWarning` with debug data and
following message.
... | def _required_idiom(tag_name, index, notfoundmsg):
cond = ""
if index > 0:
cond = " or len(el) - 1 < %d" % index
tag_name = str(tag_name)
output = IND + "if not el%s:\n" % cond
output += IND + IND + "raise UserWarning(\n"
output += IND + IND + IND + "%s +\n" % repr(notfoundmsg.str... | 1,129,520 |
Generate unittests for all of the generated code.
Args:
config (dict): Original configuration dictionary. See
:mod:`~harvester.autoparser.conf_reader` for details.
Returns:
str: Python code. | def _unittest_template(config):
output = "def test_parsers():\n"
links = dict(map(lambda x: (x["link"], x["vars"]), config))
for link in links.keys():
output += IND + "# Test parsers against %s\n" % link
output += IND + "html = handle_encodnig(\n"
output += IND + IND + "_get_s... | 1,129,525 |
Generate parser for all `paths`.
Args:
config (dict): Original configuration dictionary used to get matches
for unittests. See
:mod:`~harvester.autoparser.conf_reader` for details.
paths (dict): Output from :func:`.select_best_paths`.
Returns:
... | def generate_parsers(config, paths):
output =
# add source of neighbour picking functions from utils.py
output += inspect.getsource(conf_reader._get_source) + "\n\n"
output += inspect.getsource(utils._get_encoding) + "\n\n"
output += inspect.getsource(utils.handle_encodnig) + "\n\n"
output... | 1,129,526 |
Return common root of the two vectors.
Args:
vec1 (list/tuple): First vector.
vec2 (list/tuple): Second vector.
Usage example::
>>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0])
[1, 2]
Returns:
list: Common part of two vectors or blank list. | def common_vector_root(vec1, vec2):
root = []
for v1, v2 in zip(vec1, vec2):
if v1 == v2:
root.append(v1)
else:
return root
return root | 1,129,556 |
Find root which is common for all `elements`.
Args:
elements (list): List of double-linked HTMLElement objects.
Returns:
list: Vector of HTMLElement containing path to common root. | def find_common_root(elements):
if not elements:
raise UserWarning("Can't find common root - no elements suplied.")
root_path = el_to_path_vector(elements.pop())
for el in elements:
el_path = el_to_path_vector(el)
root_path = common_vector_root(root_path, el_path)
if... | 1,129,557 |
Return list of all dirs and files inside given dir.
Also can filter contents to return only dirs or files.
Args:
- dir_name: Which directory we need to scan (relative)
- get_dirs: Return dirs list
- get_files: Return files list
- hide_ignored: Exclude files and dirs with initial underscore | def listdir(dir_name, get_dirs=None, get_files=None, hide_ignored=False):
if get_dirs is None and get_files is None:
get_dirs = True
get_files = True
source_dir = os.path.join(settings.BASE_DIR, 'app', dir_name)
dirs = []
for dir_or_file_name in os.listdir(source_dir):
pa... | 1,129,628 |
Object initialization
Args:
key: String name of an attributes key that represents the unique identify of the request
attributes: Dictionary whose keys match the string values of the request attribute's names and values correspond the the request attribute values | def __init__(self, key, attributes):
self.key = key
self.attributes = attributes | 1,129,642 |
Convert communication namedtuple to this class.
Args:
pub (obj): :class:`.Archive` instance which will be converted.
Returns:
obj: :class:`DBArchive` instance. | def from_comm(cls, pub):
filename = None
if pub.b64_data:
filename = cls._save_to_unique_filename(pub)
return cls(
isbn=pub.isbn,
uuid=pub.uuid,
aleph_id=pub.aleph_id,
dir_pointer=filename
) | 1,129,655 |
Fetch a commit.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
sha
The SHA of the commit to fetch.
Returns:
... | def get_commit(profile, sha):
resource = "/commits/" + sha
data = api.get_request(profile, resource)
return prepare(data) | 1,129,686 |
Constructor.
Args:
name (str): Name of the periodical.
sub_trees (list): List of other trees.
sub_publications (list): List of sub-publication UUID's.
aleph_id (str): ID used in aleph.
issn (str): ISSN given to the periodical.
is_public (b... | def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
# type checks
if not self.name.strip():
raise ValueError(".name property must be set!")
if type(self.sub_trees) not in [list, tuple]:
raise ValueError(".sub_trees ... | 1,129,704 |
Returns a specific option specific in a config file
Arguments:
option_name -- Name of the option (example host_name)
section_name -- Which section of the config (default: name)
examples:
>>> get_option("some option", default="default result")
'default result' | def get_option(option_name, section_name="main", default=_sentinel, cfg_file=cfg_file):
defaults = get_defaults()
# As a quality issue, we strictly disallow looking up an option that does not have a default
# value specified in the code
#if option_name not in defaults.get(section_name, {}) and def... | 1,129,745 |
Write a new nago.ini config file from the defaults.
Arguments:
cfg_file -- File that is written to like /etc/nago/nago.ini
defaults -- Dictionary with default values to use | def generate_configfile(cfg_file,defaults=defaults):
# Create a directory if needed and write an empty file
_mkdir_for_config(cfg_file=cfg_file)
with open(cfg_file, 'w') as f:
f.write('')
for section in defaults.keys():
set_option(section, cfg_file=cfg_file, **defaults[section]) | 1,129,750 |
Convert a nested dictionary from one convention to another.
Args:
d (dict): dictionary (nested or not) to be converted.
convert_function (func): function that takes the string in one
convention and returns it in the other one.
Returns:
Dictionary with the new keys. | def morph_dict(d, convert_function):
# Attribution: https://stackoverflow.com/a/33668421/633213
new = {}
for k, v in six.iteritems(d):
new_v = v
if isinstance(v, dict):
new_v = morph_dict(v, convert_function)
elif isinstance(v, list):
new_v = list()
... | 1,129,823 |
Summary:
Initiate configuration menu to customize metal runtime options.
Console script ```keyconfig``` invokes this option_configure directly
in debug mode to display the contents of the local config file (if exists)
Args:
:path (str): full path to default local configuration file l... | def option_configure(debug=False, path=None):
if CONFIG_SCRIPT in sys.argv[0]:
debug = True # set debug mode if invoked from CONFIG_SCRIPT
if path is None:
path = local_config['PROJECT']['CONFIG_PATH']
if debug:
if os.path.isfile(path):
debug_mode('local_config fi... | 1,129,828 |
Format a long string into a block of newline seperated text.
Arguments:
See iter_format_block(). | def format(
self, text=None,
width=60, chars=False, fill=False, newlines=False,
prepend=None, append=None, strip_first=False, strip_last=False,
lstrip=False):
# Basic usage of iter_format_block(), for convenience.
return '\n'.join(
sel... | 1,129,841 |
Returns numerator / denominator, but instead of a ZeroDivisionError:
0 / 0 = 0.
x / 0 = float('inf')
This is not mathematically correct, but often practically OK.
Args:
numerator (float or int)
denominator (float or int)
Returns:
(float)
Raises:
- | def div(numerator, denominator):
try:
return numerator/denominator
except ZeroDivisionError:
if numerator == 0:
return 0.
elif denominator == 0:
return float('inf')
# return None
else:
return numerator/denominator | 1,129,849 |
reduces a multiline string to a single line of text.
args:
string: the text to reduce | def reduce_multiline(string):
string = str(string)
return " ".join([item.strip()
for item in string.split("\n")
if item.strip()]) | 1,130,122 |
Takes a list or multline text string and formats it.
* multiline text strings get converted to a single line
* list entries are joined by a carriage return
* params are passed into the sting with a python format call
args:
text: list or string to format
params: argments for ... | def format_multiline(text, params={}, **kwargs):
def format_kwargs(text, params={}, **kwargs):
if params:
if isinstance(params, dict):
kwargs.update(params)
else:
kwargs = params
try:
return text.format(**kwargs)
exce... | 1,130,123 |
Takes a string and formats it to a max width seperated by carriage
returns
args:
max_width: the max with for a line
kwargs:
indent: the number of spaces to add to the start of each line
prepend: text to add to the start of each line | def format_max_width(text, max_width=None, **kwargs):
ind = ''
if kwargs.get("indent"):
ind = ''.ljust(kwargs['indent'], ' ')
prepend = ind + kwargs.get("prepend", "")
if not max_width:
return "{}{}".format(prepend, text)
len_pre = len(kwargs.get("prepend", "")) + kwargs.get("... | 1,130,124 |
Sets up the ArgumentParser.
Args:
argv: an array of arguments | def setup(argv):
parser = argparse.ArgumentParser(
description='Compute Jekyl- and prose-aware wordcounts',
epilog='Accepted filetypes: plaintext, markdown, markdown (Jekyll)')
parser.add_argument('-S', '--split-hyphens', action='store_true',
dest='split_hyphens',
... | 1,130,208 |
Processes data provided to print a count object, or update a file.
Args:
args: an ArgumentParser object returned by setup() | def prose_wc(args):
if args.file is None:
return 1
if args.split_hyphens:
INTERSTITIAL_PUNCTUATION.append(re.compile(r'-'))
content = args.file.read().decode('utf-8')
filename = args.file.name
body = strip_frontmatter(content)
parsed = markdown_to_text(body)
result = wc(... | 1,130,209 |
Converts markdown to text.
Args:
body: markdown (or plaintext, or maybe HTML) input
Returns:
Plaintext with all tags and frills removed | def markdown_to_text(body):
# Turn our input into HTML
md = markdown.markdown(body, extensions=[
'markdown.extensions.extra'
])
# Safely parse HTML so that we don't have to parse it ourselves
soup = BeautifulSoup(md, 'html.parser')
# Return just the text of the parsed HTML
ret... | 1,130,210 |
Count the words, characters, and paragraphs in a string.
Args:
contents: the original string to count
filename (optional): the filename as provided to the CLI
parsed (optional): a parsed string, expected to be plaintext only
is_jekyll: whether the original contents were from a Jekyl... | def wc(filename, contents, parsed=None, is_jekyll=False):
if is_jekyll:
fmt = 'jekyll'
else:
fmt = 'md/txt'
body = parsed.strip() if parsed else contents.strip()
# Strip the body down to just words
words = re.sub(r'\s+', ' ', body, re.MULTILINE)
for punctuation in INTERSTIT... | 1,130,211 |
Updates a Jekyll file to contain the counts form an object
This just converts the results to YAML and adds to the Jekyll frontmatter.
Args:
filename: the Jekyll file to update
result: the results object from `wc`
content: the contents of the original file
indent: the indentatio... | def update_file(filename, result, content, indent):
# Split the file into frontmatter and content
parts = re.split('---+', content, 2)
# Load the frontmatter into an object
frontmatter = yaml.safe_load(parts[1])
# Add the counts entry in the results object to the frontmatter
frontmatter['... | 1,130,212 |
Add a new log entry to the nago log.
Arguments:
level - Arbritrary string, levels should be syslog style (debug,log,info,warning,error)
message - Arbritary string, the message that is to be logged. | def log(message, level="info"):
now = time.time()
entry = {}
entry['level'] = level
entry['message'] = message
entry['timestamp'] = now
_log_entries.append(entry) | 1,130,213 |
Decorate other functions with this one to allow access
Arguments:
nago_access -- Type of access required to call this function
By default only master is allowed to make that call
nago_name -- What name this function will have to remote api
Default is... | def nago_access(access_required="master", name=None):
def real_decorator(func):
func.nago_access = access_required
func.nago_name = name or func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return real_de... | 1,130,214 |
Establishes an API client for one-way communication
connection with an API Server
Arguments:
- connection (xbahn.connection.Connection)
Keyword Arguments:
- client_class (xbahn.api.Client): if supplied use this class to initantiate
the client object. If omitted will use xba... | def api_client(connection, client_class=xbahn.api.Client):
return client_class(
link=xbahn.connection.link.Link(
# use the connection receive messages (server responses)
receive=connection,
# use the connection to send messages (initiate requests to server)
... | 1,130,764 |
Establishes an API Server on the supplied connection
Arguments:
- connection (xbahn.connection.Connection)
- server_class (xbahn.api.Server)
Returns:
- server_class: server instance | def api_server(connection, server_class):
# run api server on connection
return server_class(
link=xbahn.connection.link.Link(
# use the connection to receive messages
receive=connection,
# use the connection to respond to received messages
respond=c... | 1,130,766 |
Send an echo request to a nago host.
Arguments:
token_or_host_name -- The remote node to ping
If node is not provided, simply return pong
You can use the special nodenames "server" or "master" | def ping(token_or_hostname=None):
if not token_or_hostname:
return "Pong!"
node = nago.core.get_node(token_or_hostname)
if not node and token_or_hostname in ('master', 'server'):
token_or_hostname = nago.settings.get_option('server')
node = nago.core.get_node(token_or_hostname)
... | 1,130,773 |
Scan `path` for viruses using ``clamd`` or ``clamscan`` (depends on
:attr:`settings.USE_CLAMD`.
Args:
path (str): Relative or absolute path of file/directory you need to
scan.
Returns:
dict: ``{filename: ("FOUND", "virus type")}`` or blank dict.
Raises:
Val... | def scan_file(path):
path = os.path.abspath(path)
if settings.USE_CLAMD:
return clamd.scan_file(path)
else:
return clamscan.scan_file(path) | 1,130,931 |
Save `b64_data` to temporary file and scan it for viruses.
Args:
filename (str): Name of the file - used as basename for tmp file.
b64_data (str): Content of the file encoded in base64.
Returns:
dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. | def save_and_scan(filename, b64_data):
with NTFile(suffix="_"+os.path.basename(filename), mode="wb") as ifile:
ifile.write(
b64decode(b64_data)
)
ifile.flush()
os.chmod(ifile.name, 0755)
return scan_file(ifile.name) | 1,130,932 |
returns a dictionary of rdfclasses based on the a lowercase search
args:
value: the value to search by | def find(value):
value = str(value).lower()
rtn_dict = RegistryDictionary()
for attr in dir(MODULE.rdfclass):
if value in attr.lower():
try:
item = getattr(MODULE.rdfclass, attr)
if issubclass(item, RdfClassBase):
... | 1,131,184 |
Creates a list of the class hierarchy
Args:
-----
class_name: name of the current class
bases: list/tuple of bases for the current class | def list_hierarchy(class_name, bases):
class_list = [Uri(class_name)]
for base in bases:
if base.__name__ not in IGNORE_CLASSES:
class_list.append(Uri(base.__name__))
return list([i for i in set(class_list)]) | 1,131,186 |
Reads through the class defs and gets the related es class
defintions
Args:
-----
class_defs: RdfDataset of class definitions | def es_get_class_defs(cls_def, cls_name):
rtn_dict = {key: value for key, value in cls_def.items() \
if key.startswith("kds_es")}
for key in rtn_dict:
del cls_def[key]
return rtn_dict | 1,131,187 |
Returns the es_defs with the instaniated rml_processor
Args:
-----
es_defs: the rdf_class elacticsearch defnitions
cls_name: the name of the tied class | def get_rml_processors(es_defs):
proc_defs = es_defs.get("kds_esRmlProcessor", [])
if proc_defs:
new_defs = []
for proc in proc_defs:
params = proc['kds_rmlProcessorParams'][0]
proc_kwargs = {}
if params.get("kds_rtn_format"):
proc_kwargs[... | 1,131,188 |
adds a property and its value to the class instance
args:
pred: the predicate/property to add
obj: the value/object to add
obj_method: *** No longer used. | def add_property(self, pred, obj):
pred = Uri(pred)
try:
self[pred].append(obj)
# except AttributeError:
# new_list = [self[pred]]
# new_list.append(obj)
# self[pred] = new_list
except KeyError:
try:
new... | 1,131,197 |
converts the class to a json compatable python dictionary
Args:
uri_format('sparql_uri','pyuri'): The format that uri values will
be returned
Returns:
dict: a json compatabile python dictionary | def conv_json(self, uri_format="sparql_uri", add_ids=False):
def convert_item(ivalue):
nvalue = ivalue
if isinstance(ivalue, BaseRdfDataType):
if ivalue.type == 'uri':
if ivalue.startswith("pyuri") and uri_format == "pyuri":
... | 1,131,198 |
Returns the es mapping for the class
args:
-----
base_class: The root class being indexed
role: the role states how the class should be mapped depending
upon whether it is used as a subject of an object. options
are es_Nested or rdf_class | def es_mapping(cls, base_class=None, role='rdf_class', **kwargs):
def _prop_filter(prop, value, **kwargs):
try:
use_prop = len(set(value.owl_inverseOf) - parent_props) > 0
except AttributeError:
use_prop = True
# if not ... | 1,131,199 |
Returns the es mapping for the class
args:
-----
base_class: The root class being indexed
role: the role states how the class should be mapped depending
upon whether it is used as a subject of an object. options
are es_Nested or rdf_class | def es_indexers(cls, base_class=None, role='rdf_class', **kwargs):
def _prop_filter(prop, value, **kwargs):
try:
use_prop = len(set(value.owl_inverseOf) - parent_props) > 0
except AttributeError:
use_prop = True
if prop ... | 1,131,200 |
Returns a JSON object of the class for insertion into es
args:
role: the role states how the class data should be returned
depending upon whether it is used as a subject of an object.
options are kds_esNested or rdf_class
remove_empty: True removes em... | def es_json(self, role='rdf_class', remove_empty=True, **kwargs):
def test_idx_status(cls_inst, **kwargs):
if kwargs.get("force") == True:
return False
idx_time = cls_inst.get("kds_esIndexTime", [None])[0]
mod_time = cls_inst.get("dcterm_... | 1,131,202 |
returns the rml mapping output for specified mapping
Args:
-----
rml_def: The name of the mapping or a dictionary definition | def get_rml(self, rml_def, **kwargs):
if isinstance(rml_def, str):
rml_procs = self.es_defs.get("kds_esRmlProcessor", [])
for item in rml_procs:
if item['name'] == rml_def:
rml_def = item
break
proc_kwargs = {rml_de... | 1,131,203 |
sets the subject value for the class instance
Args:
subject(dict, Uri, str): the subject for the class instance | def _set_subject(self, subject):
# if not subject:
# self.subject =
def test_uri(value):
# .__wrapped__
if not isinstance(value, (Uri, BlankNode)):
try:
if value.startswith("_:"):
return... | 1,131,205 |
Creates instance of the cache entry.
Args:
key: the unique key used to identify and locate the value.
value: the cached value.
timeout: time to live for the object in milliseconds | def __init__(self, key, value, timeout):
self.key = key
self.value = value
self.expiration = time.clock() * 1000 + timeout | 1,131,593 |
Changes the cached value and updates creation time.
Args:
value: the new cached value.
timeout: time to live for the object in milliseconds
Returns: None | def set_value(self, value, timeout):
self.value = value
self.expiration = time.clock() * 1000 + timeout | 1,131,594 |
Create the poller. At least one of the optional parameters must be
provided.
Parameters:
-----------
interrupt_handle : HANDLE (int), optional
If provided, the program will generate a Ctrl+C event when this
handle is signaled.
parent_handle : HANDLE (int... | def __init__(self, interrupt_handle=None, parent_handle=None):
assert(interrupt_handle or parent_handle)
super(ParentPollerWindows, self).__init__()
if ctypes is None:
raise ImportError("ParentPollerWindows requires ctypes")
self.daemon = True
self.interrupt_... | 1,131,669 |
creates a namespace if it does not exist
args:
name: the name of the namespace
ignore_errors(bool): Will ignore if a namespace already exists or
there is an error creating the namespace
returns:
True if created
False if not c... | def create_namespace(self, name, ignore_errors=False):
if not self.has_namespace(name):
self.namespaces[name] = ConjunctiveGraph()
return True
elif ignore_errors:
return True
else:
raise RuntimeError("namespace '%s' already exists"... | 1,131,823 |
deletes a namespace
args:
name: the name of the namespace
ignore_errors(bool): Will ignore if a namespace doesn not exist or
there is an error deleting the namespace
returns:
True if deleted
False if not deleted
... | def delete_namespace(self, name, ignore_errors=False):
if self.has_namespace(name):
del self.namespaces[name]
return True
elif ignore_errors:
return True
else:
raise RuntimeError("namespace '%s' does not exist" % name) | 1,131,824 |
runs a sparql query and returns the results
args:
sparql: the sparql query to run
namespace: the namespace to run the sparql query against
mode: ['get'(default), 'update'] the type of sparql query
rtn_format: ['json'(default), 'xml'] for... | def query(self,
sparql,
mode="get",
namespace=None,
rtn_format="json",
**kwargs):
if kwargs.get("debug"):
log.setLevel(logging.DEBUG)
conn = self.conn
if namespace and namespace != self.namespace... | 1,131,826 |
runs a sparql update query and returns the results
args:
sparql: the sparql query to run
namespace: the namespace to run the sparql query against | def update_query(self, sparql, namespace=None, **kwargs):
return self.query(sparql, "update", namespace, **kwargs) | 1,131,827 |
loads data via file stream from python to triplestore
Args:
data: The data or filepath to load
datatype(['ttl', 'xml', 'rdf']): the type of data to load
namespace: the namespace to use
graph: the graph to load the data to.
is_file(False): If true python ... | def load_data(self,
data,
datatype="ttl",
namespace=None,
graph=None,
is_file=False,
**kwargs):
if kwargs.get('debug'):
log.setLevel(logging.DEBUG)
time_start = date... | 1,131,828 |
Uploads data to the Blazegraph Triplestore that is stored in files
in a local directory
args:
file_path: full path to the file
namespace: the Blazegraph namespace to load the data
graph: uri of the graph to load the data. Default is None | def load_local_file(self, file_path, namespace=None, graph=None, **kwargs):
return self.load_data(file_path,
namespace=namespace,
graph=graph,
is_file=True,
**kwargs) | 1,131,830 |
Will delete and recreate specified namespace
args:
namespace(str): Namespace to reset
params(dict): params used to reset the namespace | def reset_namespace(self, namespace=None, params=None):
namespace = pick(namespace, self.namespace)
params = pick(params, self.namespace_params)
log.warning(" Reseting namespace '%s' at host: %s",
namespace,
self.url)
try:
se... | 1,131,831 |
Generates a string with random characters. If no charset is specified, only
letters and digits are used.
Args:
length (int) length of the returned string
charset (string) list of characters to choose from
Returns:
(str) with random characters from charset
Raises:
- | def random_string(length=8, charset=None):
if length < 1:
raise ValueError('Length must be > 0')
if not charset:
charset = string.letters + string.digits
return ''.join(random.choice(charset) for unused in xrange(length)) | 1,131,984 |
Extracts a dict from a string.
Args:
str_in (string) that contains python dict
Returns:
(dict) or None if no valid dict was found
Raises:
- | def str2dict(str_in):
dict_out = safe_eval(str_in)
if not isinstance(dict_out, dict):
dict_out = None
return dict_out | 1,131,985 |
Extracts a tuple from a string.
Args:
str_in (string) that contains python tuple
Returns:
(dict) or None if no valid tuple was found
Raises:
- | def str2tuple(str_in):
tuple_out = safe_eval(str_in)
if not isinstance(tuple_out, tuple):
tuple_out = None
return tuple_out | 1,131,986 |
Extracts the keys from a string that represents a dict and returns them
sorted by key.
Args:
str_in (string) that contains python dict
Returns:
(list) with keys or None if no valid dict was found
Raises:
- | def str2dict_keys(str_in):
tmp_dict = str2dict(str_in)
if tmp_dict is None:
return None
return sorted([k for k in tmp_dict]) | 1,131,987 |
Extracts the values from a string that represents a dict and returns them
sorted by key.
Args:
str_in (string) that contains python dict
Returns:
(list) with values or None if no valid dict was found
Raises:
- | def str2dict_values(str_in):
tmp_dict = str2dict(str_in)
if tmp_dict is None:
return None
return [tmp_dict[key] for key in sorted(k for k in tmp_dict)] | 1,131,988 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.