docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Generate file in defined format representing the report of pipeline(s).
Args:
store (Store): report data.
report_format (str): currently "html" is supported only.
path (str): path where to write the report to. Missing sub folders will be created. | def generate(store, report_format, path):
success = False
if report_format in ['html']:
rendered_content = {
'html': generate_html
}[report_format](store)
if not os.path.isdir(path):
os.makedirs(path)
if rendered_content is not None:
# w... | 829,938 |
Find rule for given condition.
Args:
condition (str): Python condition as string.
Returns:
str, list, function: found rule name, list of AST tokens for condition
and verification function. | def find_rule(condition):
final_condition = re.sub('{{.*}}', '42', condition)
ast_tokens = Condition.get_tokens(final_condition)
ast_compressed_tokens = Condition.compress_tokens(ast_tokens)
name = 'undefined'
function = lambda tokens: False
if len(ast_compress... | 829,947 |
Get creator function by name.
Args:
name (str): name of the creator function.
Returns:
function: creater function. | def get_creator_by_name(name):
return {'docker(container)': Container.creator,
'shell': Bash.creator, 'docker(image)': Image.creator,
'python': Script.creator, 'packer': Packer.creator,
'ansible(simple)': Ansible.creator}[name] | 829,961 |
Copying and merging environment variables.
Args:
include_os (bool): when true then include the environment variables (default: False)
Returns:
dict: environment variables as defined in the pipeline
(optional including system environment variables). | def get_merged_env(self, include_os=False):
env = {}
if include_os:
env.update(os.environ.copy())
for level in range(3):
env.update(self.pipeline.data.env_list[level].copy())
return env | 829,964 |
Saving output for configured variable name.
Args:
shell_entry(dict): shell based configuration (shell, docker container or Python).
output: list of strings representing output of last shell | def __handle_variable(self, shell_entry, output):
if 'variable' in shell_entry:
variable_name = shell_entry['variable']
self.pipeline.variables[variable_name] = "\n".join(output) | 829,973 |
Adding all files from given path to the object.
Args:
path (str): valid, existing directory | def add_path(self, path, path_filter=None):
for root, _, files in os.walk(path):
for filename in files:
full_path_and_filename = os.path.join(root, filename)
if path_filter is None or path_filter(full_path_and_filename):
relative_path_and_... | 830,032 |
Saving stored files at a given path (relative paths are added).
Args:
path (str): root path where to save the files. | def save(self, path):
for relative_path_and_filename, content in self.files.items():
full_path_and_filename = os.path.join(path, relative_path_and_filename)
full_path = os.path.dirname(full_path_and_filename)
if not os.path.isdir(full_path):
os.maked... | 830,033 |
Convert JSON into a in memory file storage.
Args:
data (str): valid JSON with path and filenames and
the base64 encoding of the file content.
Returns:
InMemoryFiles: in memory file storage | def from_json(data):
memfiles = InMemoryFiles()
memfiles.files = json.loads(data)
return memfiles | 830,034 |
Acquire the :attr:`lock`
Args:
blocking (bool): See :meth:`threading.Lock.acquire`
timeout (float): See :meth:`threading.Lock.acquire`
Returns:
bool: :obj:`True` if the lock was acquired, otherwise :obj:`False` | def acquire(self, blocking=True, timeout=-1):
result = self.lock.acquire(blocking, timeout)
return result | 830,094 |
Triggers any stored :class:`waiters <AioEventWaiter>`
Calls :meth:`AioEventWaiter.trigger` method on all instances stored in
:attr:`waiters`. After completion, the :attr:`waiters` are removed.
Args:
*args: Positional arguments to pass to :meth:`AioEventWaiter.trigger`
*... | def __call__(self, *args, **kwargs):
with self.lock:
for waiter in self.waiters:
waiter.trigger(*args, **kwargs)
self.waiters.clear() | 830,099 |
Add a coroutine function
Args:
loop: The :class:`event loop <asyncio.BaseEventLoop>` instance
on which to schedule callbacks
callback: The :term:`coroutine function` to add | def add_method(self, loop, callback):
f, obj = get_method_vars(callback)
wrkey = (f, id(obj))
self[wrkey] = obj
self.event_loop_map[wrkey] = loop | 830,101 |
Triggers all stored callbacks (coroutines)
Args:
*args: Positional arguments to pass to callbacks
**kwargs: Keyword arguments to pass to callbacks | def __call__(self, *args, **kwargs):
for loop, m in self.iter_methods():
coro = m(*args, **kwargs)
self.submit_coroutine(coro, loop) | 830,104 |
Returns a map Layer.
Arguments:
queryset -- QuerySet for Layer
Keyword args:
stylename -- str name of style to apply | def layer(self, queryset, stylename=None):
cls = RasterLayer if hasattr(queryset, 'image') else VectorLayer
layer = cls(queryset, style=stylename)
try:
style = self.map.find_style(layer.stylename)
except KeyError:
self.map.append_style(layer.stylename, la... | 831,133 |
Zoom map to geometry extent.
Arguments:
bbox -- OGRGeometry polygon to zoom map extent | def zoom_bbox(self, bbox):
try:
bbox.transform(self.map.srs)
except gdal.GDALException:
pass
else:
self.map.zoom_to_box(mapnik.Box2d(*bbox.extent)) | 831,135 |
Returns a 1D array with higher dimensions aggregated using stat fn.
Arguments:
arr -- ndarray
stat -- numpy or numpy.ma function as str to call | def agg_dims(arr, stat):
axis = None
if arr.ndim > 2:
axis = 1
arr = arr.reshape(arr.shape[0], -1)
module = np.ma if hasattr(arr, 'mask') else np
return getattr(module, stat)(arr, axis) | 834,955 |
Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as bool... | def tile(self, bbox, z=0, format=None, clip=True):
# Tile grid uses 3857, but GeoJSON coordinates should be in 4326.
tile_srid = 3857
bbox = getattr(bbox, 'geos', bbox)
clone = filter_geometry(self, intersects=bbox)
field = clone.geo_field
srid = field.srid
... | 834,959 |
Returns list of ndarrays averaged to a given number of periods.
Arguments:
periods -- desired number of periods as int | def aggregate_periods(self, periods):
try:
fieldname = self.raster_field.name
except TypeError:
raise exceptions.FieldDoesNotExist('Raster field not found')
arrays = self.arrays(fieldname)
arr = arrays[0]
if len(arrays) > 1:
if getattr... | 834,961 |
Returns a new RasterQuerySet with subsetted/summarized ndarrays.
Arguments:
geom -- geometry for masking or spatial subsetting
Keyword args:
stat -- any numpy summary stat method as str (min/max/mean/etc) | def summarize(self, geom, stat=None):
if not hasattr(geom, 'num_coords'):
raise TypeError('Need OGR or GEOS geometry, %s found' % type(geom))
clone = self._clone()
for obj in clone:
arr = obj.array(geom)
if arr is not None:
if stat:
... | 834,964 |
Generate a number in the range [0, num_buckets).
Args:
key (int): The key to hash.
num_buckets (int): Number of buckets to use.
Returns:
The bucket number `key` computes to.
Raises:
ValueError: If `num_buckets` is not a positive number. | def py_hash(key, num_buckets):
b, j = -1, 0
if num_buckets < 1:
raise ValueError('num_buckets must be a positive number')
while j < num_buckets:
b = int(j)
key = ((key * long(2862933555777941757)) + 1) & 0xffffffffffffffff
j = float(b + 1) * (float(1 << 31) / float((ke... | 836,141 |
Returns a Feature or FeatureCollection.
Arguments:
data -- Sequence or Mapping of Feature-like or FeatureCollection-like data | def as_feature(data):
if not isinstance(data, (Feature, FeatureCollection)):
if is_featurelike(data):
data = Feature(**data)
elif has_features(data):
data = FeatureCollection(**data)
elif isinstance(data, collections.Sequence):
data = FeatureCollectio... | 836,183 |
Get environment variable or provide default.
Args:
var (str): environment variable to search for
default (optional(str)): default to return | def env_or_default(var, default=None):
if var in os.environ:
return os.environ[var]
return default | 836,502 |
Encrypt and value with KMS key.
Args:
value (str): value to encrypt
key (str): key id or alias
aws_config (optional[dict]): aws credentials
dict of arguments passed into boto3 session
example:
aws_creds = {'aws_access_key_id': aws_access_key_id,
... | def kms_encrypt(value, key, aws_config=None):
aws_config = aws_config or {}
aws = boto3.session.Session(**aws_config)
client = aws.client('kms')
enc_res = client.encrypt(KeyId=key,
Plaintext=value)
return n(b64encode(enc_res['CiphertextBlob'])) | 836,503 |
Sends the given command to the server.
Args:
command (str): Command to send to the server.
Raises:
ConnectionResetError: If the connection with the server is lost.
(Shouldn't it raise BrokenPipeError too ?) | async def send_command(self, command):
command = "{}\r\n".format(command).encode("ascii", errors="backslashreplace")
self.write(command)
# Don't forget to drain or the command will stay buffered:
await self.drain() | 836,678 |
Return XHTML content of a page.
Parameters:
- id: id of a Confluence page. | def get_page_content(id):
data = _json.loads(_api.rest("/" + str(id) + "?expand=body.storage"))
return data["body"]["storage"]["value"] | 836,749 |
Return name of a page based on passed page id.
Parameters:
- id: id of a Confluence page. | def get_page_name(id):
data = _json.loads(_api.rest("/" + str(id) + "?expand=body.storage"))
return data["title"] | 836,750 |
Return id of a page based on passed page name and space.
Parameters:
- name: name of a Confluence page.
- space: space the Confluence page is in. | def get_page_id(name, space):
data = _json.loads(_api.rest("?title=" + name.replace(" ", "%20") + "&"
"spaceKey=" + space + "&expand=history"))
try:
return data["results"][0]["id"]
except:
return ("Page not found!") | 836,751 |
Create a page in Confluence.
Parameters:
- name: name of the Confluence page to create.
- parent_id: ID of the intended parent of the page.
- space: key of the space where the page will be created.
- content: XHTML content to be written to the page.
Notes: the page id can be obtained by getting ... | def create_page(name, parent_id, space, content):
data = {}
data["type"] = "page"
data["title"] = name
data["ancestors"] = [{"id": str(parent_id)}]
data["space"] = {"key": space}
data["body"] = {"storage": {"value": content, "representation": "storage"}}
return _api.rest("/", "POST", _j... | 836,752 |
Update a page in Confluence.
Parameters:
- id: ID of the page you wish to edit.
- name: name you would like to give to the page (usually the same name).
- space: space where the page lives.
- content: XHTML content to be written to the page.
Notes: it is required to try an initial update to find... | def edit_page(id, name, space, content):
data = {}
data["id"] = str(id)
data["type"] = "page"
data["title"] = name
data["space"] = {"key": space}
data["body"] = {"storage": {"value": content, "representation": "storage"}}
data["version"] = {"number": 1}
response = _api.rest("/" + s... | 836,753 |
Delete a page from Confluence, along with its children.
Parameters:
- id: id of a Confluence page.
Notes:
- Getting a 204 error is expected! It means the page can no longer be found. | def delete_page_full(id):
children = _json.loads(get_page_children(id))
for i in children["results"]:
delete_page_full(i["id"])
return delete_page(id) | 836,754 |
Sends the given command to the server.
Args:
*args: Command and arguments to be sent to the server.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPCommandFailedError: If the command fails.
Returns:... | async def do_cmd(self, *args, success=None):
if success is None:
success = (250,)
cmd = " ".join(args)
await self.writer.send_command(cmd)
code, message = await self.reader.read_reply()
if code not in success:
raise SMTPCommandFailedError(code,... | 836,759 |
Configure the device.
This method applies configuration to the device.
Args:
configlet (text): The configuration template.
plane (text): sdr or admin
attributes (dict): The dictionary of attributes used in template.
Returns:
A string with commit... | def config(self, configlet=None, plane='sdr', **attributes):
begin = time.time()
label = self._chain.target_device.config(configlet, plane, **attributes)
elapsed = time.time() - begin
if label:
self.emit_message("Configuration change last {:.0f}s. Label: {}".format(e... | 836,871 |
Rollback the configuration.
This method rolls back the configuration on the device.
Args:
label (text): The configuration label ID
plane: (text): sdr or admin
Returns:
A string with commit label or None | def rollback(self, label=None, plane='sdr'):
begin = time.time()
rb_label = self._chain.target_device.rollback(label=label, plane=plane)
elapsed = time.time() - begin
if label:
self.emit_message("Configuration rollback last {:.0f}s. Label: {}".format(elapsed, rb_labe... | 836,872 |
Discover the device details.
This method discover several device attributes.
Args:
logfile (file): Optional file descriptor for session logging. The file must be open for write.
The session is logged only if ``log_session=True`` was passed to the constructor.
... | def discovery(self, logfile=None, tracefile=None):
self._enable_logging(logfile=logfile, tracefile=tracefile)
self.log("'discovery' method is deprecated. Please 'connect' with force_discovery=True.")
self.log("Device discovery process started")
self.connect(logfile=logfile, for... | 836,873 |
Initializes a new instance of SMTPCommandFailedError.
Args:
code (int): Error code returned by the SMTP server.
message (str): Exception message, ideally providing help for the
user.
command (str): Command sent to the server that originated the
... | def __init__(self, code, message=None, command=None):
super().__init__(message)
self.code = code
self.command = command | 837,030 |
Initialize the GeneralError object.
Args:
message (str): Custom message to be passed to the exceptions. Defaults to *None*.
If *None* then the general class *__doc__* is used.
host (str): Custom string which can be used to enhance the exception message by adding the "`ho... | def __init__(self, message=None, host=None):
self.message = message
self.hostname = str(host) if host else None | 837,070 |
The main conversion process.
Args:
- string -- str, context to be converted
- lineno -- dict<int: int>, line number to actual offset mapping
Returns:
- str -- converted string | def convert(string, lineno):
def parse(string):
try:
return parso.parse(string, error_recovery=False,
version=os.getenv('F2FORMAT_VERSION', PARSO_VERSION[-1]))
except parso.ParserSyntaxError as error:
message = '%s: <%s: %r> from %r' % (err... | 837,517 |
Wrapper works for conversion.
Args:
- filename -- str, file to be converted | def f2format(filename):
print('Now converting %r...' % filename)
# fetch encoding
encoding = os.getenv('F2FORMAT_ENCODING', LOCALE_ENCODING)
lineno = dict() # line number -> file offset
content = list() # file content
with open(filename, 'r', encoding=encoding) as file:
lin... | 837,518 |
Log events to the console.
Args:
status (bool, Optional, Default=True)
whether logging to console should be turned on(True) or off(False)
level (string, Optional, Default=None) :
level of logging; whichever level is chosen all higher levels
will be logged.
... | def log_to_console(status=True, level=None):
if status:
if level is not None:
LOGGER.setLevel(level)
console_handler = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('%(levelname)s-%(name)s: %(message)s')
# add formatter to handler... | 837,963 |
Log events to a file.
Args:
status (bool, Optional, Default=True)
whether logging to file should be turned on(True) or off(False)
filename (string, Optional, Default=None) :
path of file to log to
level (string, Optional, Default=None) :
level of logging;... | def log_to_file(status=True, filename=DEFAULT_LOG_FILE, level=None):
if status:
if level is not None:
LOGGER.setLevel(level)
try:
os.mkdir(os.path.dirname(filename))
except OSError:
pass
file_handler = logging.FileHandler(filename)
... | 837,964 |
Returns a builder with stemmers for all languages added to it.
Args:
languages (list): A list of supported languages. | def get_nltk_builder(languages):
all_stemmers = []
all_stopwords_filters = []
all_word_characters = set()
for language in languages:
if language == "en":
# use Lunr's defaults
all_stemmers.append(lunr.stemmer.stemmer)
all_stopwords_filters.append(stop_wo... | 839,373 |
Wraps all word transitions with a boundary token character (\x00).
If desired (with ``force_edges`` set to ``True``), this inserts
the boundary character at the beginning and end of the string.
Arguments:
- `seq`:
- `force_edges = True`: | def boundary_transform(seq, force_edges = True):
gen = boundary_words(seq)
if force_edges:
gen = boundary_edges(gen)
gen = remove_duplicates(gen)
for char in gen:
yield char | 839,653 |
Wraps all word transitions with a boundary token character (\x00).
Arguments:
- `seq`: | def boundary_words(seq):
in_word = None
for char in seq:
if char == '\x00' and in_word is not None:
in_word = not in_word
elif char in WHITESPACE_CHARS:
if in_word is not None and in_word:
yield '\x00'
in_word = False
else:
... | 839,654 |
Removes duplicate boundary token characters from the given
character iterable.
Arguments:
- `seq`: | def remove_duplicates(seq):
last_boundary = False
for char in seq:
if char == '\x00':
if not last_boundary:
last_boundary = True
yield char
else:
last_boundary = False
yield char | 839,655 |
Performs search and replace on the given input string `seq` using
the values stored in this trie. This method uses a O(n**2)
chart-parsing algorithm to find the optimal way of replacing
matches in the input.
Arguments:
- `seq`: | def replace(self, seq):
# #1: seq must be stored in a container with a len() function
seq = list(seq)
# chart is a (n-1) X (n) table
# chart[0] represents all matches of length (0+1) = 1
# chart[n-1] represents all matches/rewrites of length (n-1+1) = n
# chart[0... | 839,668 |
Greedily matches strings in ``seq``, and replaces them with their
node values.
Arguments:
- `seq`: an iterable of characters to perform search-and-replace on | def greedy_replace(self, seq):
if not self._suffix_links_set:
self._set_suffix_links()
# start at the root
current = self.root
buffered = ''
outstr = ''
for char in seq:
while char not in current:
if current.has_dict_suffix... | 839,669 |
Performs a search against the index using lunr query syntax.
Results will be returned sorted by their score, the most relevant
results will be returned first.
For more programmatic querying use `lunr.Index.query`.
Args:
query_string (str): A string to parse into a Query.
... | def search(self, query_string):
query = self.create_query()
# TODO: should QueryParser be a method of query? should it return one?
parser = QueryParser(query_string, query)
parser.parse()
return self.query(query) | 839,794 |
Convenience method to create a Query with the Index's fields.
Args:
fields (iterable, optional): The fields to include in the Query,
defaults to the Index's `all_fields`.
Returns:
Query: With the specified fields or all the fields in the Index. | def create_query(self, fields=None):
if fields is None:
return Query(self.fields)
non_contained_fields = set(fields) - set(self.fields)
if non_contained_fields:
raise BaseLunrException(
"Fields {} are not part of the index", non_contained_fields
... | 839,795 |
Get a configuration value, using fallback for missing values.
Parameters:
config -- the configparser to try to extract the option value from.
section -- the section to extract value from.
option -- the name of the option to extract value from.
fallback -- fallback value to return if no value w... | def _get_with_fallback(config, section, option, fallback):
exists = (config.has_section(section)
and config.has_option(section, option))
if not exists:
return fallback
else:
return config.get(section, option) | 839,837 |
Actually execute the program.
Calling this method can be done from tests to simulate executing the
application from command line.
Parameters:
options -- `optionparser` from config file.
exit_codeword -- an optional exit_message that will shut down Rewind. Used
for testin... | def run(options, exit_codeword=None):
QUERY_ENDP_OPT = 'query-bind-endpoint'
STREAM_ENDP_OPT = 'streaming-bind-endpoint'
ZMQ_NTHREADS = "zmq-nthreads"
if not options.has_section(config.DEFAULT_SECTION):
msg = "Missing default section, `{0}`."
fmsg = msg.format(config.DEFAULT_SECTIO... | 839,838 |
Entry point for Rewind.
Parses input and calls run() for the real work.
Parameters:
argv -- sys.argv arguments. Can be set for testing purposes.
returns -- the proposed exit code for the program. | def main(argv=None):
parser = argparse.ArgumentParser(
description='Event storage and event proxy.',
usage='%(prog)s <configfile>'
)
parser.add_argument('--exit-codeword', metavar="MSG", dest="exit_message",
default=None, help="An incoming message that makes"
... | 839,839 |
Inserts or updates an existing index within the vector.
Args:
- insert_index (int): The index at which the element should be
inserted.
- val (int|float): The value to be inserted into the vector.
- fn (callable, optional): An optional callable taking two
... | def upsert(self, insert_index, val, fn=None):
fn = fn or (lambda current, passed: passed)
self._magnitude = 0
position = self.position_for_index(insert_index)
if position < len(self.elements) and self.elements[position] == insert_index:
self.elements[position + 1] = ... | 839,900 |
Instantiate an `LogEventStore` from config.
Parameters:
_config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for e... | def from_config(config, **options):
expected_args = ('path',)
rconfig.check_config_options("LogEventStore", expected_args, tuple(),
options)
return LogEventStore(options['path']) | 840,000 |
Instantiate an `RotatedEventStore` from config.
Parameters:
_config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
f... | def from_config(config, **options):
expected_args = ('prefix', 'realclass')
for arg in expected_args:
if arg not in options:
msg = "Required option missing: {0}"
raise rconfig.ConfigurationError(msg.format(arg))
# Not logging unrecognized opti... | 840,008 |
Construct a filename for a database.
Parameters:
batchno -- batch number for the rotated database.
Returns the constructed path as a string. | def _construct_filename(self, batchno):
return os.path.join(self.dirpath,
"{0}.{1}".format(self.prefix, batchno)) | 840,011 |
Find the batch number that contains a certain event.
Parameters:
uuid -- the event uuid to search for.
returns -- a batch number, or None if not found. | def _find_batch_containing_event(self, uuid):
if self.estore.key_exists(uuid):
# Reusing already opened DB if possible
return self.batchno
else:
for batchno in range(self.batchno - 1, -1, -1):
# Iterating backwards here because we are more lik... | 840,013 |
Construct a persisted event store that is stored on disk.
Parameters:
events_per_batch -- number of events stored in a batch before rotating
the files. Defaults to 25000. That number is
arbitrary and should probably be configures so that
... | def __init__(self, events_per_batch=25000):
assert isinstance(events_per_batch, int), \
"Events per batch must be integer."
assert events_per_batch > 0, "Events per batch must be positive"
self.events_per_batch = events_per_batch
self.count = 0
self.stores = ... | 840,015 |
Retrieves the SnowballStemmer for a particular language.
Args:
language (str): ISO-639-1 code of the language. | def get_language_stemmer(language):
from lunr.languages import SUPPORTED_LANGUAGES
from nltk.stem.snowball import SnowballStemmer
return SnowballStemmer(SUPPORTED_LANGUAGES[language]) | 840,026 |
Wrapper around a NLTK SnowballStemmer, which includes stop words for
each language.
Args:
stemmer (SnowballStemmer): Stemmer instance that performs the stemming.
token (lunr.Token): The token to stem.
i (int): The index of the token in a set.
tokens (list): A list of tokens repr... | def nltk_stemmer(stemmer, token, i=None, tokens=None):
def wrapped_stem(token, metadata=None):
return stemmer.stem(token)
return token.update(wrapped_stem) | 840,027 |
Ses the logging level of the script based on command line options.
Arguments:
- `verbose`:
- `quiet`: | def set_log_level(verbose, quiet):
if quiet:
verbose = -1
if verbose < 0:
verbose = logging.CRITICAL
elif verbose == 0:
verbose = logging.WARNING
elif verbose == 1:
verbose = logging.INFO
elif 1 < verbose:
verbose = logging.DEBUG
LOGGER.setLevel(verbo... | 840,077 |
Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`: | def detect_pattern_format(pattern_filename, encoding, on_word_boundaries):
tsv = True
boundaries = on_word_boundaries
with open_file(pattern_filename) as input_file:
for line in input_file:
line = line.decode(encoding)
if line.count('\t') != 1:
tsv = Fals... | 840,078 |
Process escaped characters in ``sval``.
Arguments:
- `sval`: | def sub_escapes(sval):
sval = sval.replace('\\a', '\a')
sval = sval.replace('\\b', '\x00')
sval = sval.replace('\\f', '\f')
sval = sval.replace('\\n', '\n')
sval = sval.replace('\\r', '\r')
sval = sval.replace('\\t', '\t')
sval = sval.replace('\\v', '\v')
sval = sval.replace('\\\\',... | 840,079 |
Constructs a finite state machine for performing string rewriting.
Arguments:
- `pattern_filename`:
- `pattern_format`:
- `encoding`:
- `on_word_boundaries`: | def build_trie(pattern_filename, pattern_format, encoding, on_word_boundaries):
boundaries = on_word_boundaries
if pattern_format == 'auto' or not on_word_boundaries:
tsv, boundaries = detect_pattern_format(pattern_filename, encoding,
on_word_boundari... | 840,080 |
Rewrites a string using the given trie object.
Arguments:
- `sval`:
- `trie`:
- `boundaries`:
- `slow`: | def rewrite_str_with_trie(sval, trie, boundaries = False, slow = False):
if boundaries:
sval = fsed.ahocorasick.boundary_transform(sval)
if slow:
sval = trie.replace(sval)
else:
sval = trie.greedy_replace(sval)
if boundaries:
sval = ''.join(fsed.ahocorasick.boundary_... | 840,082 |
Add/replace FITS key
Add/replace the key keyname with value keyval of type keytype in filename.
Parameters:
----------
keyname : str
FITS Keyword name.
keyval : str
FITS keyword value.
keytype: str
FITS keyword type: int, float, str or bool.
filaname : str
F... | def add_key_val(keyname, keyval, keytype, filename, extnum):
funtype = {'int': int, 'float': float, 'str': str, 'bool': bool}
if keytype not in funtype:
raise ValueError('Undefined keyword type: ', keytype)
with fits.open(filename, "update") as hdulist:
hdulist[extnum].header[keyname] ... | 840,260 |
Normalize ``value`` to an Inspire category.
Args:
value(str): an Inspire category to properly case, or an arXiv category
to translate to the corresponding Inspire category.
Returns:
str: ``None`` if ``value`` is not a non-empty string,
otherwise the corresponding Inspir... | def classify_field(value):
if not (isinstance(value, six.string_types) and value):
return
schema = load_schema('elements/inspire_field')
inspire_categories = schema['properties']['term']['enum']
for inspire_category in inspire_categories:
if value.upper() == inspire_category.upper... | 840,463 |
Load the given schema from wherever it's installed.
Args:
schema_name(str): Name of the schema to load, for example 'authors'.
resolved(bool): If True will return the resolved schema, that is with
all the $refs replaced by their targets.
Returns:
dict: the schema with the g... | def load_schema(schema_name, resolved=False):
schema_data = ''
with open(get_schema_path(schema_name, resolved)) as schema_fd:
schema_data = json.loads(schema_fd.read())
return schema_data | 840,468 |
Load the schema from a given record.
Args:
data (dict): record data.
schema (Union[dict, str]): schema to validate against.
Returns:
dict: the loaded schema.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and... | def _load_schema_for_record(data, schema=None):
if schema is None:
if '$schema' not in data:
raise SchemaKeyNotFound(data=data)
schema = data['$schema']
if isinstance(schema, six.string_types):
schema = load_schema(schema_name=schema)
return schema | 840,469 |
Normalize collaboration string.
Args:
collaboration: a string containing collaboration(s) or None
Returns:
list: List of extracted and normalized collaborations
Examples:
>>> from inspire_schemas.utils import normalize_collaboration
>>> normalize_collaboration('for the CMS... | def normalize_collaboration(collaboration):
if not collaboration:
return []
collaboration = collaboration.strip()
if collaboration.startswith('(') and collaboration.endswith(')'):
collaboration = collaboration[1:-1]
collaborations = _RE_AND.split(collaboration)
collaborations ... | 840,472 |
Get the license abbreviation from an URL.
Args:
url(str): canonical url of the license.
Returns:
str: the corresponding license abbreviation.
Raises:
ValueError: when the url is not recognized | def get_license_from_url(url):
if not url:
return
split_url = urlsplit(url, scheme='http')
if split_url.netloc.lower() == 'creativecommons.org':
if 'publicdomain' in split_url.path:
match = _RE_PUBLIC_DOMAIN_URL.match(split_url.path)
if match is None:
... | 840,473 |
Convert back a ``publication_info`` value from the new format to the old.
Does the inverse transformation of :func:`convert_old_publication_info_to_new`,
to be used whenever we are sending back records from Labs to Legacy.
Args:
publication_infos: a ``publication_info`` in the new format.
Ret... | def convert_new_publication_info_to_old(publication_infos):
def _needs_a_hidden_pubnote(journal_title, journal_volume):
return (
journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and
journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE[journal_title]
)
result ... | 840,475 |
Add an affiliation.
Args:
value (string): affiliation value
curated_relation (bool): is relation curated
record (dict): affiliation JSON reference | def add_affiliation(self, value, curated_relation=None, record=None):
if value:
affiliation = {
'value': value
}
if record:
affiliation['record'] = record
if curated_relation is not None:
affiliation['curate... | 840,497 |
Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, th... | def set_uid(self, uid, schema=None):
try:
uid, schema = author_id_normalize_and_schema(uid, schema)
except UnknownUIDSchema:
# Explicit schema wasn't provided, and the UID is too little
# to figure out the schema of it, this however doesn't mean
#... | 840,498 |
Return the page range or the article id of a publication_info entry.
Args:
publication_info(dict): a publication_info field entry of a record
separator(basestring): optional page range symbol, defaults to a single dash
Returns:
string: the page range or the article ... | def get_page_artid_for_publication_info(publication_info, separator):
if 'artid' in publication_info:
return publication_info['artid']
elif 'page_start' in publication_info and 'page_end' in publication_info:
page_start = publication_info['page_start']
page_... | 840,691 |
Return the page range or the article id of a record.
Args:
separator(basestring): optional page range symbol, defaults to a single dash
Returns:
string: the page range or the article id of the record.
Examples:
>>> record = {
... 'publicatio... | def get_page_artid(self, separator='-'):
publication_info = get_value(
self.record,
'publication_info[0]',
default={}
)
return LiteratureReader.get_page_artid_for_publication_info(
publication_info,
separator
) | 840,692 |
Add a keyword.
Args:
keyword(str): keyword to add.
schema(str): schema to which the keyword belongs.
source(str): source for the keyword. | def add_keyword(self, keyword, schema=None, source=None):
keyword_dict = self._sourced_dict(source, value=keyword)
if schema is not None:
keyword_dict['schema'] = schema
self._append_to('keywords', keyword_dict) | 840,773 |
Add a figure.
Args:
key (string): document key
url (string): document url
Keyword Args:
caption (string): simple description
label (string):
material (string):
original_url (string): original url
filename (string): curr... | def add_figure(self, key, url, **kwargs):
figure = self._check_metadata_for_file(key=key, url=url, **kwargs)
for dict_key in (
'caption',
'label',
'material',
'filename',
'url',
'original_url',
):
if kw... | 840,786 |
Adds document to record
Args:
key (string): document key
url (string): document url
Keyword Args:
description (string): simple description
fulltext (bool): mark if this is a full text
hidden (bool): is document should be hidden
mate... | def add_document(self, key, url, **kwargs):
document = self._check_metadata_for_file(key=key, url=url, **kwargs)
for dict_key in (
'description',
'fulltext',
'hidden',
'material',
'original_url',
'url',
'filena... | 840,787 |
Exception raised when a UID is not matching provided schema.
Args:
schema (string): given schema
uid (string): UID which conflicts the schema | def __init__(self, schema, uid):
message = 'UID "{}" is not of the schema "{}".'.format(uid, schema)
super(SchemaUIDConflict, self).__init__(message) | 841,118 |
Exception raised when a schema of a UID is unknown.
Args:
uid (string): given UID | def __init__(self, uid):
message = 'Schema of UID "{}" is unrecognized.'.format(uid)
super(UnknownUIDSchema, self).__init__(message) | 841,119 |
Function to produce a general 2D plot.
Args:
x (list): x points.
y (list): y points.
filename (str): Filename of the output image.
title (str): Title of the plot. Default is '' (no title).
x_label (str): x-axis label.
y_label (str): y-axis label. | def gnuplot_2d(x, y, filename, title='', x_label='', y_label=''):
_, ext = os.path.splitext(filename)
if ext != '.png':
filename += '.png'
gnuplot_cmds = \
scr = _GnuplotScriptTemp(gnuplot_cmds)
data = _GnuplotDataTemp(x, y)
args_dict = {
'filename': filename,
... | 841,204 |
Function to produce a general 3D plot from a 2D matrix.
Args:
z_matrix (list): 2D matrix.
filename (str): Filename of the output image.
title (str): Title of the plot. Default is '' (no title).
x_label (str): x-axis label.
y_label (str): y-axis label. | def gnuplot_3d_matrix(z_matrix, filename, title='', x_label='', y_label=''):
_, ext = os.path.splitext(filename)
if ext != '.png':
filename += '.png'
gnuplot_cmds = \
scr = _GnuplotScriptTemp(gnuplot_cmds)
data = _GnuplotDataZMatrixTemp(z_matrix)
args_dict = {
'filena... | 841,205 |
Append the ``element`` to the ``field`` of the record.
This method is smart: it does nothing if ``element`` is empty and
creates ``field`` if it does not exit yet.
Args:
:param field: the name of the field of the record to append to
:type field: string
:para... | def _append_to(self, field, element):
if element not in EMPTIES:
self.obj.setdefault(field, [])
self.obj.get(field).append(element) | 841,367 |
Add name variant.
Args:
:param name: name variant for the current author.
:type name: string | def add_name_variant(self, name):
self._ensure_field('name', {})
self.obj['name'].setdefault('name_variants', []).append(name) | 841,368 |
Add native name.
Args:
:param name: native name for the current author.
:type name: string | def add_native_name(self, name):
self._ensure_field('name', {})
self.obj['name'].setdefault('native_names', []).append(name) | 841,369 |
Add previous name.
Args:
:param name: previous name for the current author.
:type name: string | def add_previous_name(self, name):
self._ensure_field('name', {})
self.obj['name'].setdefault('previous_names', []).append(name) | 841,370 |
Add email address.
Args:
:param email: email of the author.
:type email: string
:param hidden: if email is public or not.
:type hidden: boolean | def add_email_address(self, email, hidden=None):
existing_emails = get_value(self.obj, 'email_addresses', [])
found_email = next(
(existing_email for existing_email in existing_emails if existing_email.get('value') == email),
None
)
if found_email is None... | 841,371 |
Add a personal website.
Args:
:param url: url to the person's website.
:type url: string
:param description: short description of the website.
:type description: string | def add_url(self, url, description=None):
url = {
'value': url,
}
if description:
url['description'] = description
self._append_to('urls', url) | 841,372 |
Add a private note.
Args:
:param comment: comment about the author.
:type comment: string
:param source: the source of the comment.
:type source: string | def add_private_note(self, note, source=None):
note = {
'value': note,
}
if source:
note['source'] = source
self._append_to('_private_notes', note) | 841,377 |
Add artid, start, end pages to publication info of a reference.
Args:
page_start(Optional[string]): value for the field page_start
page_end(Optional[string]): value for the field page_end
artid(Optional[string]): value for the field artid
Raises:
ValueEr... | def set_page_artid(self, page_start=None, page_end=None, artid=None):
if page_end and not page_start:
raise ValueError('End_page provided without start_page')
self._ensure_reference_field('publication_info', {})
publication_info = self.obj['reference']['publication_info']
... | 841,934 |
Reload a specific NApp or all Napps.
Args:
napp (list): NApp list to be reload.
Raises:
requests.HTTPError: When there's a server error. | def reload_napps(self, napps=None):
if napps is None:
napps = []
api = self._config.get('kytos', 'api')
endpoint = os.path.join(api, 'api', 'kytos', 'core', 'reload',
'all')
response = self.make_request(endpoint)
... | 842,500 |
Send an user_dict to NApps server using POST request.
Args:
user_dict(dict): Dictionary with user attributes.
Returns:
result(string): Return the response of Napps server. | def register(self, user_dict):
endpoint = os.path.join(self._config.get('napps', 'api'), 'users', '')
res = self.make_request(endpoint, method='POST', json=user_dict)
return res.content.decode('utf-8') | 842,503 |
Enable a list of NApps.
Args:
napps (list): List of NApps. | def enable_napps(cls, napps):
mgr = NAppsManager()
for napp in napps:
mgr.set_napp(*napp)
LOG.info('NApp %s:', mgr.napp_id)
cls.enable_napp(mgr) | 842,602 |
The group index with respect to wavelength.
Args:
wavelength (float, list, None): The wavelength(s) the group
index will be evaluated at.
Returns:
float, list: The group index at the target wavelength(s). | def ng(self, wavelength):
return self.n(wavelength) - (wavelength*1.e-9)*self.nDer1(wavelength) | 842,657 |
The group velocity dispersion (GVD) with respect to wavelength.
Args:
wavelength (float, list, None): The wavelength(s) the GVD will
be evaluated at.
Returns:
float, list: The GVD at the target wavelength(s). | def gvd(self, wavelength):
g = (wavelength*1.e-9)**3./(2.*spc.pi*spc.c**2.) * self.nDer2(wavelength)
return g | 842,658 |
Helpful function to evaluate Cauchy equations.
Args:
wavelength (float, list, None): The wavelength(s) the
Cauchy equation will be evaluated at.
coefficients (list): A list of the coefficients of
the Cauchy equation.
Returns:
float, l... | def _cauchy_equation(wavelength, coefficients):
n = 0.
for i, c in enumerate(coefficients):
exponent = 2*i
n += c / wavelength**exponent
return n | 842,659 |
Checks if the specified model instance matches the class model.
By default this method will raise a `ValueError` if the model is not of
expected type.
Args:
model (Model) : The instance to be type checked
raise_error (bool) : Flag to specify whether to raise error on
... | def _isinstance(self, model, raise_error=True):
rv = isinstance(model, self.__model__)
if not rv and raise_error:
raise ValueError('%s is not of type %s' % (model, self.__model__))
return rv | 842,727 |
Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters | def _preprocess_params(cls, kwargs):
# kwargs.pop('csrf_token', None)
for attr, val in kwargs.items():
if cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_:
del kwargs[attr]
continue
if val == "":
# ... | 842,728 |
Updates an instance.
Args:
**kwargs : Arbitrary keyword arguments. Column names are
keywords and their new values are the values.
Examples:
>>> customer.update(email="newemail@x.com", name="new") | def update(self, **kwargs):
kwargs = self._preprocess_params(kwargs)
kwargs = self.preprocess_kwargs_before_update(kwargs)
for key, value in kwargs.iteritems():
cls = type(self)
if not hasattr(cls, key) or isinstance(getattr(cls, key), property):
... | 842,730 |
Same as SQLAlchemy's filter_by. Additionally this accepts
two special keyword arguments `limit` and `reverse` for limiting
the results and reversing the order respectively.
Args:
**kwargs: filter parameters
Examples:
>>> user = User.filter_by(email="new@x.com"... | def filter_by(cls, **kwargs):
limit = kwargs.pop('limit', None)
reverse = kwargs.pop('reverse', False)
q = cls.query.filter_by(**kwargs)
if reverse:
q = q.order_by(cls.id.desc())
if limit:
q = q.limit(limit)
return q | 842,731 |
Adds a model instance to session and commits the
transaction.
Args:
model: The instance to add.
Examples:
>>> customer = Customer.new(name="hari", email="hari@gmail.com")
>>> Customer.add(customer)
hari@gmail.com | def add(cls, model, commit=True):
if not isinstance(model, cls):
raise ValueError('%s is not of type %s' % (model, cls))
cls.session.add(model)
try:
if commit:
cls.session.commit()
return model
except:
cls.session.r... | 842,735 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.