docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Convert OAI to MARC XML.
Args:
marc_oai (str): String with either OAI or MARC XML.
Returns:
str: String with MARC XML. | def _oai_to_xml(marc_oai): # TODO: move this to MARC XML parser?
record = MARCXMLRecord(marc_oai)
record.oai_marc = False
return record.to_XML() | 1,075,467 |
Add proper XML namespace to the `marc_xml` record.
Args:
marc_xml (str): String representation of the XML record.
Returns:
str: XML with namespace. | def _add_namespace(marc_xml):
dom = marc_xml
if isinstance(dom, basestring):
dom = dhtmlparser.parseString(marc_xml)
root = dom.find("root")
if root:
root[0].params = {}
for record in dom.find("record"):
record.params = {}
collections = dom.find("collection")
... | 1,075,468 |
If `content_or_path` contains ``\\n``, return it. Else assume, that it is
path and read file at that path.
Args:
content_or_path (str): Content or path to the file.
Returns:
str: Content.
Raises:
IOError: whhen the file is not found. | def _read_content_or_path(content_or_path):
if "\n" in content_or_path.strip():
return content_or_path
if not os.path.exists(content_or_path):
raise IOError("File '%s' doesn't exists!" % content_or_path)
with open(content_or_path) as f:
return f.read() | 1,075,469 |
Read MARC XML or OAI file, convert, add namespace and return XML in
required format with all necessities.
Args:
xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
Returns:
obj: Required XML parsed with ``lxml.etree``. | def _read_marcxml(xml):
# read file, if `xml` is valid file path
marc_xml = _read_content_or_path(xml)
# process input file - convert it from possible OAI to MARC XML and add
# required XML namespaces
marc_xml = _oai_to_xml(marc_xml)
marc_xml = _add_namespace(marc_xml)
file_obj = Str... | 1,075,470 |
Read XSLT template.
Args:
template (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
Returns:
obj: Required XML parsed with ``lxml.etree``. | def _read_template(template):
template = _read_content_or_path(template)
file_obj = StringIO.StringIO(template)
return ET.parse(file_obj) | 1,075,471 |
Transform `xml` using XSLT `template`.
Args:
xml (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
template (str): Filename or XML string. Don't use ``\\n`` in case of
filename.
Returns:
str: Transformed `xml` as string. | def xslt_transformation(xml, template):
transformer = ET.XSLT(
_read_template(template)
)
newdom = transformer(
_read_marcxml(xml)
)
return ET.tostring(newdom, pretty_print=True, encoding="utf-8") | 1,075,472 |
Loads a single fixture.
Args:
* fixture_id (str): the id of the fixture
* head2head (int, optional): load the previous n fixture of the two teams
Returns:
* :obj: json: the fixture-json | def get_fixture(self, fixture_id, head2head=None):
filters = []
if head2head is not None and int(head2head) > 0:
self.logger.debug(f'Getting fixture {fixture_id}. head2head is {head2head}.')
filters.append(self.__createFilter('head2head', head2head))
else:
... | 1,075,850 |
Loads the players of a team.
Args:
* team (:obj: json): a team in json format obtained from the service.
Returns:
* :obj: json: the players of the team | def get_players(self, team):
team_id = self.__get_team_id(team)
self.logger.debug(f'Getting players of team {team_id}.')
return self._request('teams', team_id, 'players') | 1,075,851 |
Return a list of indexes of substr. If substr not found, list is
empty.
Arguments:
substr (str): Substring to match.
string (str): String to match in.
Returns:
list of int: Start indices of substr. | def get_substring_idxs(substr, string):
return [match.start() for match in re.finditer(substr, string)] | 1,076,282 |
Truncate a string to a maximum number of characters.
If the string is longer than maxchar, then remove excess
characters and append an ellipses.
Arguments:
string (str): String to truncate.
maxchar (int): Maximum length of string in characters. Must be >= 4.
Returns:
str: Of... | def truncate(string, maxchar):
if maxchar < 4:
raise TruncateError("Maxchar must be > 3")
if len(string) <= maxchar:
return string
else:
return string[:maxchar - 3] + "..." | 1,076,283 |
Initialise the class.
Args:
client (:class:`consulate.Consul`): A :class:`consulate.Consul` instance.
base_path (str): the base path to use in Consul. | def __init__(self, client, base_path):
self._client = client
self._base_path = base_path
self._queue_path = posixpath.join(self._base_path, 'queue', '')
self._counter_path = posixpath.join(self._queue_path, 'counter')
self._ensure_counter()
self._ensure_queue() | 1,076,360 |
Put a task into the queue.
Args:
value (str): Task data.
priority (int): An optional priority as an integer with at most 3 digits.
Lower values signify higher priority. | def put(self, value, priority=100):
task_name = '{}{:03d}_{}'.format(self.TASK_PREFIX, priority, self._counter)
path = posixpath.join(self._queue_path, task_name)
self._client.kv[path] = value | 1,076,364 |
Create a new table.
If the table already exists, nothing happens.
Example:
>>> db.create_table("foo", (("id", "integer primary key"),
("value", "text")))
Arguments:
name (str): The name of the table to create.
schema ... | def create_table(self, name, schema):
columns = [" ".join(column) for column in schema]
self.execute("CREATE TABLE IF NOT EXISTS {name} ({columns})"
.format(name=name, columns=",".join(columns))) | 1,076,383 |
Create a new table with same schema as the source.
If the named table already exists, nothing happens.
Arguments:
name (str): The name of the table to create.
src (str): The name of the source table to duplicate.
Raises:
sql.OperationalError: If source ta... | def create_table_from(self, name, src):
# Lookup the command which was used to create the "src" table.
query = self.execute("SELECT sql FROM sqlite_master WHERE "
"type='table' and name=?", (src,))
try:
cmd = query.fetchone()[0]
except Ty... | 1,076,384 |
Create a carbon copy of the source table.
Arguments:
src (str): The name of the table to copy.
dst (str): The name of the target duplicate table.
Raises:
sql.OperationalError: If source table does not exist. | def copy_table(self, src, dst):
# Create table.
self.create_table_from(dst, src)
# Copy contents of src to dst.
self.execute("INSERT INTO {dst} SELECT * FROM {src}"
.format(dst=dst, src=src))
# Commit changes.
self.commit() | 1,076,385 |
Pick frame info from current caller's `frame`.
Args:
* frame: :type:`frame` instance, use :func:`inspect.currentframe`.
* parent: whether to get outer frame (caller) traceback info, :data:`False` by default.
Returns:
:class:`inspect.Trackback` instance from :data:`frame` or its parent ... | def traceback(frame, parent=False):
# Traceback(filename='<stdin>', lineno=1, function='<module>', code_context=None, index=None)
if parent is True:
# frame itself will always be placed @ the first index of its outerframes.
outers = inspect.getouterframes(frame)
traceback = (len(out... | 1,076,393 |
Create the real absolute path for the given path.
Add supports for userdir & / supports.
Args:
* path: pathname to use for realpath.
Returns:
Platform independent real absolute path. | def realpath(path):
if path == '~':
return userdir
if path == '/':
return sysroot
if path.startswith('/'):
return os.path.abspath(path)
if path.startswith('~/'):
return os.path.expanduser(path)
if path.startswith('./'):
return os.path.abspath(os.path.... | 1,076,428 |
Find absolute file/folder paths with the given ``re`` pattern.
Args:
* pattern: search pattern, support both string (exact match) and `re` pattern.
* path: root path to start searching, default is current working directory.
* recursive: whether to recursively find the matched items from `pa... | def find(pattern, path=os.path.curdir, recursive=False):
root = realpath(path)
Finder = lambda item: regex.is_regex(pattern) \
and pattern.match(item) or (pattern == item)
if recursive:
for base, dirs, files in os.walk(root, topdown=True):
for segment in iterto... | 1,076,429 |
Copy item to the given `dest` path.
Args:
* dest: destination path to copy. | def copy(self, dest):
if os.path.isfile(self.path):
shutil.copy2(self.path, dest)
else:
shutil.copytree(self.path, dest, symlinks=False, ignore=None) | 1,076,430 |
Release lock.
To release a lock, we must already own the lock.
Arguments:
force (bool, optional): If true, ignore any existing lock owner.
Raises:
UnableToReleaseLockError: If the lock is claimed by another
process (not raised if force option is used). | def release(self, force=False):
# There's no lock, so do nothing.
if not self.islocked:
return
if self.owned_by_self or force:
os.remove(self.path)
else:
raise UnableToReleaseLockError(self) | 1,076,491 |
Read the contents of a LockFile.
Arguments:
path (str): Path to lockfile.
Returns:
Tuple(int, datetime): The integer PID of the lock owner, and the
date the lock was required. If the lock is not claimed, both
values are None. | def read(path):
if fs.exists(path):
with open(path) as infile:
components = infile.read().split()
pid = int(components[0])
date = datetime.date.fromtimestamp(float(components[1]))
return pid, date
else:
return N... | 1,076,492 |
Write the contents of a LockFile.
Arguments:
path (str): Path to lockfile.
pid (int): The integer process ID.
timestamp (datetime): The time the lock was aquired. | def write(path, pid, timestamp):
with open(path, "w") as lockfile:
print(pid, timestamp, file=lockfile) | 1,076,493 |
Create an image from a terminated host (with auto_delete_boot_disk=False)
Args:
name: The name of the image | def terminate_and_create_image(name):
node = _host_node()
operation = _gcp().instances().delete(project=DEFAULT_PROJECT, zone=DEFAULT_ZONE,
instance=node['real_name']).execute()
while True:
status = get_zone_operation_status(operation=operation)
... | 1,076,702 |
Load a png or jpeg image into a bitmap buffer.
Args:
buf (Buffer): Buffer to load
request_components (int): If you want to force number of components
Returns:
A tuple containing:
- Bitmap buffer
- width of bitmap
- height of bitmap
- number of componen... | def load_image(buf, request_components=0):
x = ffi.new('int*')
y = ffi.new('int*')
n = ffi.new('int*')
cbuf = ffi.from_buffer(buf)
bitmap = lib.stbi_load_from_memory(
ffi.cast('unsigned char*', cbuf), len(buf), x, y, n,
request_components
)
pybuffer = ffi.buffer(bitma... | 1,076,803 |
Resize an image
Args:
buf (Buffer): Buffer coming from `load_image`
width (int): Width of `buf`
height (int): Height of `buf`
num_channels (int): Number of channels in `buf` (RGBA=4)
new_width (int): Desired width
new_height (int): Desired height
Returns:
... | def resize_image(buf, width, height, num_channels, new_width, new_height):
new_size = new_width * new_height * num_channels
input_pixels = ffi.from_buffer(buf)
output_pixels = ffi.new('unsigned char[]', new_size)
result = lib.stbir_resize_uint8(
ffi.cast('unsigned char*', input_pixels), wi... | 1,076,804 |
Finalise a plot.
Display or show the plot, then close it.
Arguments:
output (str, optional): Path to save figure to. If not given,
show plot.
figsize ((float, float), optional): Figure size in inches.
**kwargs: Any additional arguments to pass to
plt.savefig(). Onl... | def finalise(output=None, figsize=None, tight=True, **kwargs):
import matplotlib.pyplot as plt
# Set figure size.
if figsize is not None:
plt.gcf().set_size_inches(*figsize)
# Set plot layout.
if tight:
plt.tight_layout()
if output is None:
plt.show()
else:
... | 1,076,913 |
r'''
Create one or more cloud servers
Args:
* provider (str): Cloud provider, e.g. ec2, digitalocean
* count (int) =1: Number of instances
* name (str) =None: Name of server(s)
* \**kwargs: Provider-specific flags | def create(provider, count=1, name=None, **kwargs):
r
count = int(count)
provider = provider_by_name(provider)
options = provider.create_server_defaults
options.update(kwargs)
names = [name] * count
provider.validate_create_options(**options)
return provider.create_servers(count, names, ... | 1,077,421 |
SSH into the server(s) (sequentially if more than one)
Args:
cmd (str) ='': Command to run on the server | def ssh(cmd=''):
with settings(warn_only=True):
local('ssh -A -o StrictHostKeyChecking=no -i "%s" %s@%s "%s"' % (
env.key_filename, env.user, env.host, cmd)) | 1,077,422 |
Connection object builder.
Args:
virtualhost (str): selected virtualhost in rabbitmq
Returns:
pika.ConnectionParameters: object filled by `constants` from
:class:`edeposit.amqp.settings`. | def getConParams(virtualhost):
return pika.ConnectionParameters(
host=settings.RABBITMQ_HOST,
port=int(settings.RABBITMQ_PORT),
virtual_host=virtualhost,
credentials=pika.PlainCredentials(
settings.RABBITMQ_USER_NAME,
settings.RABBITMQ_USER_PASSWORD
... | 1,077,435 |
Return function for sending progress messages back to original caller.
Args:
uuid (str): UUID of the received message.
key (str): Routing key.
Returns:
fn reference: Reference to function which takes only one data \
argument. | def get_sendback(self, uuid, key):
def send_back_callback(data):
self.sendResponse(
serializers.serialize(data),
uuid,
key
)
return send_back_callback | 1,077,439 |
Callback called when exception was raised.
This method serializes the exception and sends it over AMQP back
to caller.
Args:
e (obj): Instance of the exception.
uuid (str): UUID of the message that caused the exception to raise.
routing_key (str): Which rout... | def process_exception(self, e, uuid, routing_key, body, tb=None):
# get informations about message
msg = e.message if hasattr(e, "message") else str(e)
exception_type = str(e.__class__)
exception_name = str(e.__class__.__name__)
print "Sending exception %s: %s for UUID ... | 1,077,440 |
SSH into a running container, using the host as a jump host. This requires
the container to have a running sshd process.
Args:
* container: Container name or ID
* cmd='': Command to run in the container
* user='root': SSH username
* password='root': SSH password | def ssh(container, cmd='', user='root', password='root'):
ip = get_ip(container)
ssh_cmd = 'sshpass -p \'%s\' ssh -A -t -o StrictHostKeyChecking=no \'%s\'@%s' % (password, user, ip)
local('ssh -A -t -o StrictHostKeyChecking=no -i "%s" %s@%s %s %s' % (
env.key_filename, env.user, env.host, ssh_c... | 1,078,563 |
Prepare a vanilla server by installing docker, curl, and sshpass. If a file called ``dot_dockercfg``
exists in the current working directory, it is uploaded as ``~/.dockercfg``.
Args:
* docker_mount=None: Partition that will be mounted as /var/lib/docker | def setup(docker_mount=None, force=False):
if not is_ubuntu() and not is_boot2docker():
raise Exception('Head In The Clouds Docker is only supported on Ubuntu')
# a bit hacky
if os.path.exists('dot_dockercfg') and not fabric.contrib.files.exists('~/.dockercfg'):
put('dot_dockercfg', '... | 1,078,566 |
Kill a container
Args:
* container: Container name or ID
* rm=True: Remove the container or not | def kill(container, rm=True):
container = get_container(container)
if not container:
raise Exception('No such container: %s' % container)
unbind_all(container['ip']) # legacy, only here for backwards compatibility
sudo('docker kill %s' % container['name'])
if rm:
sudo('docker r... | 1,078,569 |
Set up an SSH tunnel into the container, using the host as a gateway host.
Args:
* container: Container name or ID
* local_port: Local port
* remote_port=None: Port on the Docker container (defaults to local_port)
* gateway_port=None: Port on the gateway host (defaults to remote_por... | def tunnel(container, local_port, remote_port=None, gateway_port=None):
if remote_port is None:
remote_port = local_port
if gateway_port is None:
gateway_port = remote_port
remote_host = get_ip(container)
command = % {
'key_filename': env.key_filename,
'local_port... | 1,078,570 |
Insert `tag` before `before` tag if present. If not, insert it into `root`.
Args:
tag (obj): HTMLElement instance.
before (obj): HTMLElement instance.
root (obj): HTMLElement instance. | def insert_tag(tag, before, root):
if not before:
root.childs.append(tag)
tag.parent = root
return
if type(before) in [tuple, list]:
before = first(before)
# check that `before` is double linked
if not hasattr(before, "parent"):
raise ValueError("Input must... | 1,078,959 |
Transform content in all `tags` using result of `content_transformer(tag)`
call.
Args:
tags (obj/list): HTMLElement instance, or list of HTMLElement
instances.
content_transformer (function): Function which is called as
``content_... | def transform_content(tags, content_transformer):
if type(tags) not in [tuple, list]:
tags = [tags]
for tag in tags:
new_child = dhtmlparser.HTMLElement(content_transformer(tag))
# don't forget to add parent if the list is double-linked
if hasattr(tag, "parent"):
... | 1,078,960 |
Create double linked DOM from input.
In case of string, parse it, make it double-linked. In case of DOM, just
make it double-linked.
Args:
str_or_dom (str/HTMLelement): String or HTMLelement instance.
Returns:
obj: HTMLelement with parsed, double-linked content from `str_or_dom`. | def double_linked_dom(str_or_dom):
dom = str_or_dom
if not isinstance(str_or_dom, dhtmlparser.HTMLElement):
dom = dhtmlparser.parseString(str_or_dom)
dhtmlparser.makeDoubleLinked(dom)
return dom | 1,078,961 |
Set global variables to values defined in `config_dict`.
Args:
config_dict (dict): dictionary with data, which are used to set \
`globals`.
Note:
`config_dict` have to be dictionary, or it is ignored. Also all
variables, that are not already in globals, or a... | def substitute_globals(config_dict):
constants = get_all_constants()
if type(config_dict) != dict:
return
for key in config_dict.keys():
if key in constants and type(config_dict[key]) in _ALLOWED:
globals()[key] = config_dict[key] | 1,079,258 |
Instantiate a directory checksum cache.
Arguments:
path (str): Path to persistent cache store.
hash (str, optional): Hash algorithm to use, e.g. 'md5', 'sha1'. | def __init__(self, path, hash='sha1'):
self.path = fs.path(path)
self.hash = hash
db = sqlite3.connect(self.path)
c = db.cursor()
c.execute()
db.commit()
db.close() | 1,079,275 |
Compute the hash of a directory.
Arguments:
path: Directory.
**dirhash_opts: Additional options to checksumdir.dirhash().
Returns:
str: Checksum of directory. | def dirhash(self, path, **dirhash_opts):
path = fs.path(path)
last_modified = time.ctime(max(
max(os.path.getmtime(os.path.join(root, file)) for file in files)
for root,_,files in os.walk(path)))
db = sqlite3.connect(self.path)
c = db.cursor()
c.... | 1,079,277 |
Creates the data for a SSDP request.
Args:
request_line (string): The request line for the request (e.g.
``"M-SEARCH * HTTP/1.1"``).
headers (dict of string -> string): Dictionary of header name - header
value pairs to present in the request.
Returns:
bytes: The... | def encode_request(request_line, **headers):
lines = [request_line]
lines.extend(['%s: %s' % kv for kv in headers.items()])
return ('\r\n'.join(lines) + '\r\n\r\n').encode('utf-8') | 1,079,779 |
Decodes the data from a SSDP response.
Args:
data (bytes): The encoded response.
Returns:
dict of string -> string: Case-insensitive dictionary of header name to
header value pairs extracted from the response. | def decode_response(data):
res = CaseInsensitiveDict()
for dataline in data.decode('utf-8').splitlines()[1:]:
dataline = dataline.strip()
if not dataline:
continue
line_parts = dataline.split(':', 1)
# This is to deal with headers with no value.
if len(li... | 1,079,780 |
Send an SSDP search request via the provided socket.
Args:
sock: A socket suitable for use to send a broadcast message - preferably
one created by :py:func:`make_socket`.
search_target (string): A :term:`resource type` target to search for. | def request_via_socket(sock, search_target):
msgparts = dict(HOST=MCAST_IP_PORT, MAN='"ssdp:discover"', MX='3', ST=search_target)
msg = encode_request('M-SEARCH * HTTP/1.1', **msgparts)
sock.sendto(msg, (MCAST_IP, MCAST_PORT)) | 1,079,781 |
Yield SSDP search responses and advertisements from the provided socket.
Args:
sock: A socket suitable for use to send a broadcast message - preferably
one created by :py:func:`make_socket`.
timeout (int / float): Overall time in seconds for how long to wait for
before no lo... | def responses_from_socket(sock, timeout=10):
now = time.time()
give_up_by = now + timeout
while now < give_up_by:
try:
data = sock.recv(1024)
except socket.timeout:
now = time.time()
continue
# We handle either search responses or announceme... | 1,079,782 |
Add `xmlns` and `ID` attributes to ``<mods:mods>`` tag.
Args:
dom (HTMLElement): DOM containing whole document.
volume_counter (int, default 0): ID of volume. | def add_missing_xml_attributes(dom, volume_counter=0):
mods_tag = get_mods_tag(dom)
if mods_tag:
params = mods_tag.params
# add missing attributes
params["ID"] = "MODS_VOLUME_%04d" % (volume_counter + 1)
params["xmlns:mods"] = "http://www.loc.gov/mods/v3"
params["x... | 1,079,833 |
Fix bugs in `mods` produced by XSLT template.
Args:
marc_xml (str): Original Aleph record.
mods (str): XML string generated by XSLT template.
uuid (str): UUID of the package.
counter (int): Number of record, is added to XML headers.
url (str): URL of the publication (public ... | def postprocess_monograph(marc_xml, mods, uuid, counter, url):
dom = double_linked_dom(mods)
if not isinstance(marc_xml, MARCXMLRecord):
marc_xml = MARCXMLRecord(marc_xml)
add_missing_xml_attributes(dom, counter)
fix_invalid_type_parameter(dom)
if uuid:
add_uuid(dom, uuid)
... | 1,079,842 |
Split a string by ``sep`` and yield chunks
Args:
s (str-type): string to split
sep (str-type): delimiter to split by
Yields:
generator of strings: chunks of string s | def itersplit(s, sep=None):
if not s:
yield s
return
exp = re.compile(r'\s+' if sep is None else re.escape(sep))
pos = 0
while True:
m = exp.search(s, pos)
if not m:
if pos < len(s) or sep is not None:
yield s[pos:]
break
... | 1,079,863 |
Itersplit a string into a (named, if specified) tuple.
Args:
str_ (str): string to split
fsep (str): field separator (delimiter to split by)
revtuple (object): namedtuple (or class with a ``._fields`` attr)
(optional)
fields (list of str): field names (if revtuple is not... | def itersplit_to_fields(str_,
fsep=DEFAULT_FSEP,
revtuple=None,
fields=[],
preparse=None):
if preparse:
str_ = preparse(str_)
_fields = itersplit(str_, fsep)
if revtuple is not None:
try:
... | 1,079,864 |
Search for repositories with a stack and ``os.listdir``
Args:
where (str): path to search from
Yields:
Repository subclass instance | def listdir_find_repos(where):
stack = deque([(convert_path(where), '')])
while stack:
where, prefix = stack.pop()
try:
for name in sorted(os.listdir(where), reverse=True):
fn = os.path.join(where, name)
if name in REPO_PREFIXES:
... | 1,079,866 |
Search for repositories with GNU find
Args:
where (str): path to search from
ignore_error (bool): if False, raise Exception when the returncode is
not zero.
Yields:
Repository subclass instance | def find_find_repos(where, ignore_error=True):
log.debug(('REPO_REGEX', REPO_REGEX))
FIND_REPO_REGEXCMD = ("-regex", '.*(%s)$' % REPO_REGEX)
if os.uname()[0] == 'Darwin':
cmd = ("find",
'-E',
'-L', # dereference symlinks
where,
FIND_R... | 1,079,867 |
Search for repositories and deduplicate based on ``repo.fpath``
Args:
where (str): path to search from
Yields:
Repository subclass | def find_unique_repos(where):
repos = Dict()
path_uuids = Dict()
log.debug("find_unique_repos(%r)" % where)
for repo in find_find_repos(where):
# log.debug(repo)
repo2 = (hasattr(repo, 'search_upwards')
and repo.search_upwards(upwards=path_uuids))
if repo2:
... | 1,079,868 |
Do a repository report: call the report function for each Repository
Args:
repos (iterable): iterable of Repository instances
report (string): report name
output (writeable): output stream to print to
Yields:
Repository subclass | def do_repo_report(repos, report='full', output=sys.stdout, *args, **kwargs):
for i, repo in enumerate(repos):
log.debug(str((i, next(repo.origin_report()))))
try:
if repo is not None:
reportfunc = REPORT_TYPES.get(report)
if reportfunc is None:
... | 1,079,869 |
Generate a thg-reporegistry.xml file from a list of repos and print
to output
Args:
repos (iterable): iterable of Repository subclass instances
output (writeable): output stream to which THG XML will be printed | def do_tortoisehg_report(repos, output):
import operator
import xml.etree.ElementTree as ET
root = ET.Element('reporegistry')
item = ET.SubElement(root, 'treeitem')
group = ET.SubElement(item, 'group', attrib=Dict(name='groupname'))
def fullname_to_shortname(fullname):
s... | 1,079,870 |
Create a new Repository instance
Args:
fpath (str): path (relative or absolute) to repository | def __init__(self, fpath):
self.fpath = os.path.abspath(fpath)
self.symlinks = [] | 1,079,873 |
Run a command with the current working directory set to self.fpath
Args:
cmd (str or tuple): cmdstring or listlike
Keyword Arguments:
ignore_error (bool): if False, raise an Exception if p.returncode is
not 0
cwd (str): current working dir to run cmd... | def sh(self, cmd, ignore_error=False, cwd=None, shell=False, **kwargs):
kwargs.update({
'shell': shell,
'cwd': cwd or self.fpath,
'stderr': subprocess.STDOUT,
'stdout': subprocess.PIPE,
'ignore_error': ignore_error})
log.debug((('cmd',... | 1,079,887 |
Convert a URL to local mercurial URL schemes
Args:
url (str): URL to map to local mercurial URL schemes
example::
# schemes.gh = git://github.com/
>> remote_url = git://github.com/westurner/dotfiles'
>> to_hg_scheme_url(remote_url)
<< gh://w... | def to_hg_scheme_url(cls, url):
regexes = cls._get_url_scheme_regexes()
for scheme_key, pattern, regex in regexes:
match = regex.match(url)
if match is not None:
groups = match.groups()
if len(groups) == 2:
return u''.j... | 1,079,894 |
Preprocess/cleanup a bzr log message before parsing
Args:
s (str): log message string
by (int): cutoff threshold for log message length
Returns:
str: preprocessed log message string | def _logmessage_transform(cls, s, by=2):
if len(s) >= by:
return s[by:].strip('\n')
return s.strip('\n') | 1,079,903 |
Parse bazaar log file format
Args:
r (str): bzr revision identifier
Yields:
dict: dict of (attr, value) pairs
::
$ bzr log -l1
------------------------------------------------------------
revno: 1
committer: ubuntu <ubun... | def _parselog(self, r):
def __parselog(entry):
bufname = None
buf = deque()
print(entry)
if entry == ['']:
return
for l in itersplit(entry, '\n'):
if not l:
continue
... | 1,079,904 |
Deeply updates a dictionary. List values are concatenated.
Args:
d (dict): First dictionary which will be updated
u (dict): Second dictionary use to extend the first one
Returns:
dict: The merge dictionary | def deep_update(d, u):
for k, v in u.items():
if isinstance(v, Mapping):
d[k] = deep_update(d.get(k, {}), v)
elif isinstance(v, list):
existing_elements = d.get(k, [])
d[k] = existing_elements + [ele for ele in v if ele not in existing_elements]
else:
d[k] = v
return d | 1,079,932 |
Recursively update values in dst from src.
Unlike the builtin dict.update() function, this method will decend into
nested dicts, updating all nested values.
Arguments:
dst (dict): Destination dict.
src (dict): Source dict.
Returns:
dict: dst updated with entries from src. | def update(dst, src):
for k, v in src.items():
if isinstance(v, Mapping):
r = update(dst.get(k, {}), v)
dst[k] = r
else:
dst[k] = src[k]
return dst | 1,080,084 |
Recursively get values in dict.
Unlike the builtin dict.values() function, this method will descend into
nested dicts, returning all nested values.
Arguments:
src (dict): Source dict.
Returns:
list: List of values. | def dict_values(src):
for v in src.values():
if isinstance(v, dict):
for v in dict_values(v):
yield v
else:
yield v | 1,080,085 |
Create servers and containers as required to meet the configuration
specified in _name_.
Args:
* name: The name of the yaml config file (you can omit the .yml extension for convenience)
Example:
fab ensemble.up:wordpress | def up(name, debug=False):
if debug:
env.ensemble_debug = True
filenames_to_try = [
name,
'%s.yml' % name,
'%s.yaml' % name,
]
for filename in filenames_to_try:
if os.path.exists(filename):
with open(filename, 'r') as f:
config ... | 1,080,263 |
Stop a profiling timer.
Arguments:
name (str): The name of the timer to stop. If no name is given, stop
the global anonymous timer.
Returns:
bool: Whether or not profiling is enabled.
Raises:
KeyError: If the named timer does not exist. | def stop(name, file=sys.stderr):
if is_enabled():
elapsed = (time() - __TIMERS[name])
if elapsed > 60:
elapsed_str = '{:.1f} m'.format(elapsed / 60)
elif elapsed > 1:
elapsed_str = '{:.1f} s'.format(elapsed)
else:
elapsed_str = '{:.1f} ms'.for... | 1,080,265 |
Run the system command with optional options.
Args:
* command: system command.
* cwd: current working directory.
* verbose: direct options for :func:`subprocess.Popen`.
Returns:
Opened process, standard output & error. | def execute(command, cwd=os.path.curdir, **options):
process = subprocess.Popen(shlex.split(command), cwd=cwd, **options)
stdout, stderr = process.communicate()
return process, stdout, stderr | 1,080,323 |
Higher order function creating a compare function for objectives.
Args:
objective (cipy.algorithms.core.Objective): The objective to create a
compare for.
Returns:
callable: Function accepting two objectives to compare.
Examples:
>>> a = Minimum(0.1)
>>> b = Mi... | def comparator(objective):
if isinstance(objective, Minimum):
return lambda l, r: l < r
else:
return lambda l, r: l > r | 1,080,796 |
Parse the source into an AST node with PyPosAST.
Enhance nodes with positions
Arguments:
code -- code text
Keyword Arguments:
filename -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized | def parse(code, filename='<unknown>', mode='exec', tree=None):
visitor = Visitor(code, filename, mode, tree=tree)
return visitor.tree | 1,080,912 |
Find all nodes of a given type
Arguments:
code -- code text
desired_type -- ast Node or tuple
Keyword Arguments:
path -- code path
mode -- execution mode (exec, eval, single)
tree -- current tree, if it was optimized | def get_nodes(code, desired_type, path="__main__", mode="exec", tree=None):
return _GetVisitor(parse(code, path, mode, tree), desired_type).result | 1,080,913 |
Constructor. Also see Entry.__init__.
Args:
allow_comments (bool): Whether to allow comments. Default False.
directory (str): Optional. If the page should live in a subdirectory
instead of at the web root, specify it here instead of making it
part of the slug. | def __init__(self, *args, allow_comments=False, directory=None, **kwargs):
super().__init__(*args, **kwargs)
self.allow_comments = allow_comments
self.dir = directory | 1,081,055 |
Constructor. Also see Entry.__init__.
Args:
pubdate (datetime): When the post was published.
excerpt (str): An excerpt of the post body.
tags (list): A list of Tag objects associated with the post.
allow_comments (bool): Whether to allow comments. Default False. | def __init__(self, *args, pubdate=None, excerpt=None, tags=None, allow_comments=True, **kwargs):
super().__init__(*args, **kwargs)
self.excerpt = excerpt or _get_excerpt(self.body)
self.pubdate = pubdate
self.tags = tags or []
self.allow_comments = allow_comments | 1,081,056 |
Get corresponding text in the code
Arguments:
lines -- code splitted by linebreak
node -- PyPosAST enhanced node
Keyword Arguments:
lstrip -- During extraction, strip lines with this arg (default="")
ljoin -- During extraction, join lines with this arg (default="\n")
strip -- After extra... | def extract_code(lines, node, lstrip="", ljoin="\n", strip=""):
first_line, first_col = node.first_line - 1, node.first_col
last_line, last_col = node.last_line - 1, node.last_col
if first_line == last_line:
return lines[first_line][first_col:last_col].strip(strip)
result = []
# Add fi... | 1,081,125 |
Parse HTML from text into array filled with tags end text.
Source code is little bit unintutive, because it is state machine parser.
For better understanding, look at http://bit.ly/1rXRcJj
Example::
>>> dhtmlparser._raw_split('<html><tag params="true"></html>')
['<html>', '<tag params="t... | def _raw_split(itxt):
echr = ""
buff = ["", "", "", ""]
content = ""
array = []
next_state = 0
inside_tag = False
escaped = False
COMMENT_START = ["-", "!", "<"]
COMMENT_END = ["-", "-"]
gc.disable()
for c in itxt:
# content
if next_state == StateEnum.... | 1,081,208 |
Go through `istack` and search endtag. Element at first index is considered
as opening tag.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
int: Index of end tag or 0 if not found. | def _indexOfEndTag(istack):
if len(istack) <= 0:
return 0
if not istack[0].isOpeningTag():
return 0
cnt = 0
opener = istack[0]
for index, el in enumerate(istack[1:]):
if el.isOpeningTag() and \
el.getTagName().lower() == opener.getTagName().lower():
... | 1,081,209 |
Recursively go through element array and create DOM.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
list: DOM tree as list. | def _parseDOM(istack):
ostack = []
end_tag_index = 0
def neither_nonpair_or_end_or_comment(el):
return not (el.isNonPairTag() or el.isEndTag() or el.isComment())
index = 0
while index < len(istack):
el = istack[index]
# check if this is pair tag
end_tag_index ... | 1,081,210 |
Standard output from `dhtmlparser` is single-linked tree. This will make it
double-linked.
Args:
dom (obj): :class:`.HTMLElement` instance.
parent (obj, default None): Don't use this, it is used in recursive
call. | def makeDoubleLinked(dom, parent=None):
dom.parent = parent
for child in dom.childs:
child.parent = dom
makeDoubleLinked(child, dom) | 1,081,212 |
Remove all tags from `dom` and obtain plaintext representation.
Args:
dom (str, obj, array): str, HTMLElement instance or array of elements.
Returns:
str: Plain string without tags. | def removeTags(dom):
# python 2 / 3 shill
try:
string_type = basestring
except NameError:
string_type = str
# initialize stack with proper value (based on dom parameter)
element_stack = None
if type(dom) in [list, tuple]:
element_stack = dom
elif isinstance(dom,... | 1,081,213 |
Register the extension with the application.
Args:
app (flask.Flask): The application to register with. | def init_app(self, app):
app.url_rule_class = partial(NavigationRule, copilot=self)
app.context_processor(self.inject_context) | 1,081,233 |
Register a navbar entry with the copilot.
Args:
navbar_kwargs (dict): Arguments passed to the
:class:`NavbarEntry` instance. | def register_entry(self, navbar_kwargs):
# Add a new rule for each level in the path.
path = navbar_kwargs.pop('path')
# If a single object is used rather than an iterable (including
# a single string), wrap it before using.
if not hasattr(path, '__iter__') or isinstance... | 1,081,235 |
Parameters
----------
args:
context: str or None
style: str or None
palette: str or None
kwargs:
- reset
Raises
------
ValueError: | def use(*args, context=None, style=None, palette=None, **kwargs):
if kwargs.get('reset', False):
styles = ['default', ]
else:
styles = []
styles.extend(list(args))
styles.append(collect(context=context, style=style, palette=palette, **kwargs))
# apply mpls styles
return mpl... | 1,081,294 |
Parameters
----------
args:
context: str or None
style: str or None
palette: str or None
kwargs:
- reset
Raises
------
ValueError: | def temp(*args, context=None, style=None, palette=None, **kwargs):
# apply specified matplotlib styles and reset if specified
styles = list(args)
styles.append(collect(context=context, style=style, palette=palette, **kwargs))
return mpl.style.context(styles, after_reset=kwargs.get('reset', False)) | 1,081,295 |
Make a HTML link out of an URL.
Args:
title (str): Text to show for the link.
url (str): URL the link will point to.
blank (bool): If True, appends target=_blank, noopener and noreferrer to
the <a> element. Defaults to False. | def make_link(title, url, blank=False):
attrs = 'href="%s"' % url
if blank:
attrs += ' target="_blank" rel="noopener noreferrer"'
return '<a %s>%s</a>' % (attrs, title) | 1,081,301 |
Constructor.
Args:
root_path (str): Full path to the directory which contains the posts,
pages, templates etc. directories.
root_url (str): The root URL of your website.
site_title (str): The title of your website.
site_desc (str): A subtitle or description of your website. | def __init__(self, root_path, root_url, site_title, site_desc=None):
self.root_path = root_path
self.root_url = root_url
self.site_title = site_title
self.site_desc = site_desc
self.cm = russell.content.ContentManager(root_url) #pylint: disable=invalid-name
self.pages = self.cm.pages
self.posts = self... | 1,081,302 |
Get the URL of an asset. If asset hashes are added and one exists for
the path, it will be appended as a query string.
Args:
path (str): Path to the file, relative to your "assets" directory. | def get_asset_url(self, path):
url = self.root_url + '/assets/' + path
if path in self.asset_hash:
url += '?' + self.asset_hash[path]
return url | 1,081,303 |
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If s... | def get_posts(self, num=None, tag=None, private=False):
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts | 1,081,308 |
Generate an XML sitemap.
Args:
path (str): The name of the file to write to.
https (bool): If True, links inside the sitemap with relative scheme
(e.g. example.com/something) will be set to HTTPS. If False (the
default), they will be set to plain HTTP. | def generate_sitemap(self, path='sitemap.xml', https=False):
sitemap = russell.sitemap.generate_sitemap(self, https=https)
self.write_file(path, sitemap) | 1,081,317 |
Write a file of any type to the destination path. Useful for files like
robots.txt, manifest.json, and so on.
Args:
path (str): The name of the file to write to.
contents (str or bytes): The contents to write. | def write_file(self, path, contents):
path = self._get_dist_path(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if isinstance(contents, bytes):
mode = 'wb+'
else:
mode = 'w'
with open(path, mode) as file:
file.write(contents) | 1,081,318 |
Returns a formatted string about the status, useful for logging.
args:
r - takes requests.models.Response | def request_status(r, detailed=False):
base_string = "HTTP {r.request.method} {r.request.url}: {r.status_code}"
if r.status_code in range(200,99):
string = base_string
if detailed is True:
string += " - {r.json()}"
else:
string += " - 👍"
return string.format(r=r)
else:
string = base_string
r... | 1,081,371 |
Adding retries to requests.get with exponential backoff.
Args:
url (str): The URL to fetch
max_backoff (int): The number of seconds to sleep at maximums
verbose (bool): Whether to print exceptions.
Returns:
Response: For successful requests return requests' response. `None` oth... | def get(url, max_backoff=32, verbose=False, **kwargs):
sleep_seconds = 1
while sleep_seconds <= max_backoff:
try:
# you may overwrite `timeout` via `kwargs`
response = requests.get(url, **{**{'timeout': 30}, **kwargs})
# for 4xx, return instantly, no hope of succ... | 1,081,544 |
Unescape `quote` in string `inp`.
Example usage::
>> unescape('hello \\"')
'hello "'
Args:
inp (str): String in which `quote` will be unescaped.
quote (char, default "): Specify which character will be unescaped.
Returns:
str: Unescaped string. | def unescape(inp, quote='"'):
if len(inp) < 2:
return inp
output = ""
unesc = False
for act in inp:
if act == quote and unesc:
output = output[:-1]
output += act
if act == "\\":
unesc = not unesc
else:
unesc = False
... | 1,082,169 |
Escape `quote` in string `inp`.
Example usage::
>>> escape('hello "')
'hello \\"'
>>> escape('hello \\"')
'hello \\\\"'
Args:
inp (str): String in which `quote` will be escaped.
quote (char, default "): Specify which character will be escaped.
Returns:
... | def escape(inp, quote='"'):
output = ""
for c in inp:
if c == quote:
output += '\\'
output += c
return output | 1,082,170 |
Re-orders a miz file into a folder (flattened)
Args:
miz_file_path: source miz file
target_dir: folder to flatten the content into
skip_options_file: do not re-order option file | def reorder(miz_file_path: typing.Union[str, Path],
target_dir: typing.Union[str, Path],
skip_options_file: bool,
):
miz_file_path = Path(miz_file_path).absolute()
if not miz_file_path.exists():
raise FileNotFoundError(miz_file_path)
... | 1,082,342 |
Flattens a MIZ file into the temp dir
Args:
overwrite: allow overwriting exiting files | def unzip(self, overwrite: bool = False):
if self.zip_content and not overwrite:
raise FileExistsError(str(self.temp_dir))
LOGGER.debug('unzipping miz to temp dir')
try:
with ZipFile(str(self.miz_path)) as zip_file:
LOGGER.debug('reading info... | 1,082,347 |
Return a JSON serializable type for ``o``.
Args:
obj (:py:class:`object`): the object to be serialized.
Raises:
:py:class:`AttributeError`:
when ``o`` is not a Python object.
Returns:
(dict): JSON serializable type for the given object. | def as_object(obj):
LOGGER.debug('as_object(%s)', obj)
if isinstance(obj, datetime.date):
return as_date(obj)
elif hasattr(obj, '__dict__'):
# populate dict with visible attributes
out = {k: obj.__dict__[k] for k in obj.__dict__ if not k.startswith('_')}
# populate d... | 1,082,497 |
Return the RFC3339 UTC string representation of the given date and time.
Args:
dat (:py:class:`datetime.date`): the object/type to be serialized.
Raises:
TypeError:
when ``o`` is not an instance of ``datetime.date``.
Returns:
(str) JSON serializable type for the given ... | def as_date(dat):
LOGGER.debug('as_date(%s)', dat)
return strict_rfc3339.timestamp_to_rfc3339_utcoffset(
calendar.timegm(dat.timetuple())) | 1,082,498 |
Create `endtags` to elements which looks like openers, but doesn't have
proper :attr:`HTMLElement.endtag`.
Args:
childs (list): List of childs (:class:`HTMLElement` obj) - typically
from :attr:`HTMLElement.childs` property.
Returns:
list: List of closed elements. | def _closeElements(childs, HTMLElement):
out = []
# close all unclosed pair tags
for e in childs:
if not e.isTag():
out.append(e)
continue
if not e.isNonPairTag() and not e.isEndTag() and not e.isComment() \
and e.endtag is None:
e.childs... | 1,082,910 |
Read password from external file and retrun as string. The file should
contain just single line. Prevents hard-coding password anywhere in this
script. IMPORTANT! Password is stored as plain text! Do NOT use with your
personal account!"
Args:
pass_file (str): /path/to/pass_file | def read_passwd_file(pass_file):
with open(pass_file) as fin:
passwd = fin.read().strip()
return passwd | 1,083,069 |
A Puush Account can be instantiated either with API key or
e-mail and password.
Parameters:
* api_key_or_email: API key if it's the only argument, e-mail if
password parameter is present.
* password (optional): The password for the Puush account if
... | def __init__(self, api_key_or_email, password=None):
# E-mail and password authentication
if password is not None:
email = api_key_or_email
self.is_premium, self._api_key, _, _ = auth(email, password)
# Direct API key authentication
else:
api_... | 1,083,105 |
Upload a file to the Puush account.
Parameters:
* f: The file. Either a path to a file or a file-like object. | def upload(self, f):
if hasattr(f, 'read'):
needs_closing = False
else:
f = open(f, 'rb')
needs_closing = True
# The Puush server can't handle non-ASCII filenames.
# The official Puush desktop app actually substitutes ? for
# non-ISO-8... | 1,083,108 |
Delete a file.
Parameters:
* id: The Puush ID of the file to delete. | def delete(self, id):
res = self._api_request('del', data={'i': id})[0]
if res[0] == '-1':
raise PuushError("File deletion failed.") | 1,083,109 |
Get the 100x100 thumbnail of a file. Return the raw PNG data.
Parameters:
* id: The Puush ID of the file to get the thumbnail of. | def thumbnail(self, id):
res = self._raw_api_request('thumb', data={'i': id})
if not res:
raise PuushError("Getting thumbnail failed.")
return res | 1,083,110 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.