_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q258100 | BaseWindow.clear_values | validation | def clear_values(self, red=0.0, green=0.0, blue=0.0, alpha=0.0, depth=1.0):
"""
Sets the clear values for the window buffer.
Args:
red (float): red compoent
green (float): green compoent
blue (float): blue compoent
| python | {
"resource": ""
} |
q258101 | BaseWindow.keyboard_event | validation | def keyboard_event(self, key, action, modifier):
"""
Handles the standard keyboard events such as camera movements,
taking a screenshot, closing the window etc.
Can be overriden add new keyboard events. Ensure this method
is also called if you want to keep the standard features.
Arguments:
key: The key that was pressed or released
action: The key action. Can be `ACTION_PRESS` or `ACTION_RELEASE`
modifier: Modifiers such as holding shift or ctrl
"""
# The well-known standard key for quick exit
if key == self.keys.ESCAPE:
self.close()
return
# Toggle pause time
if key == self.keys.SPACE and action == self.keys.ACTION_PRESS:
self.timer.toggle_pause()
# Camera movement
# Right
if key == self.keys.D:
if action == self.keys.ACTION_PRESS:
self.sys_camera.move_right(True)
elif action == self.keys.ACTION_RELEASE:
self.sys_camera.move_right(False)
# Left
elif key == self.keys.A:
if action == self.keys.ACTION_PRESS:
self.sys_camera.move_left(True)
elif action == self.keys.ACTION_RELEASE:
self.sys_camera.move_left(False)
# Forward
elif key == self.keys.W:
if action == self.keys.ACTION_PRESS:
self.sys_camera.move_forward(True)
if action == self.keys.ACTION_RELEASE:
self.sys_camera.move_forward(False)
# Backwards
elif key == self.keys.S:
if action == self.keys.ACTION_PRESS:
self.sys_camera.move_backward(True)
if action == self.keys.ACTION_RELEASE:
self.sys_camera.move_backward(False)
# UP
elif key == self.keys.Q:
if action == self.keys.ACTION_PRESS:
self.sys_camera.move_down(True)
| python | {
"resource": ""
} |
q258102 | BaseWindow.cursor_event | validation | def cursor_event(self, x, y, dx, dy):
"""
The standard mouse movement event method.
Can be overriden to add new functionality.
By default this feeds the system camera with new values.
| python | {
"resource": ""
} |
q258103 | BaseWindow.set_default_viewport | validation | def set_default_viewport(self):
"""
Calculates the viewport based on the configured aspect ratio in settings.
Will add black borders if the window do not match the viewport.
"""
# The expected height with the current viewport width
expected_height = int(self.buffer_width / self.aspect_ratio)
# How | python | {
"resource": ""
} |
q258104 | Timer.start | validation | def start(self):
"""Start the timer"""
self.music.start()
if | python | {
"resource": ""
} |
q258105 | Timer.toggle_pause | validation | def toggle_pause(self):
"""Toggle pause mode"""
self.controller.playing | python | {
"resource": ""
} |
q258106 | SceneLoader.supports_file | validation | def supports_file(cls, meta):
"""Check if the loader has a supported file extension"""
path = Path(meta.path)
for ext in cls.file_extensions:
| python | {
"resource": ""
} |
q258107 | Tracks.get | validation | def get(self, name) -> Track:
"""
Get or create a Track object.
:param name: Name of the track
:return: Track object
"""
name = name.lower()
track = self.track_map.get(name)
if | python | {
"resource": ""
} |
q258108 | find_commands | validation | def find_commands(command_dir: str) -> List[str]:
"""
Get all command names in the a folder
:return: List of commands names
| python | {
"resource": ""
} |
q258109 | Settings.update | validation | def update(self, **kwargs):
"""Override settings values"""
for | python | {
"resource": ""
} |
q258110 | Settings.add_program_dir | validation | def add_program_dir(self, directory):
"""Hack in program directory"""
dirs = list(self.PROGRAM_DIRS)
| python | {
"resource": ""
} |
q258111 | Settings.add_texture_dir | validation | def add_texture_dir(self, directory):
"""Hack in texture directory"""
dirs = list(self.TEXTURE_DIRS)
| python | {
"resource": ""
} |
q258112 | Settings.add_data_dir | validation | def add_data_dir(self, directory):
"""Hack in a data directory"""
dirs = list(self.DATA_DIRS)
| python | {
"resource": ""
} |
q258113 | VAO.render | validation | def render(self, program: moderngl.Program, mode=None, vertices=-1, first=0, instances=1):
"""
Render the VAO.
Args:
program: The ``moderngl.Program``
Keyword Args:
mode: Override the draw mode (``TRIANGLES`` etc)
vertices (int): The number of vertices to transform
first (int): The | python | {
"resource": ""
} |
q258114 | VAO.transform | validation | def transform(self, program: moderngl.Program, buffer: moderngl.Buffer,
mode=None, vertices=-1, first=0, instances=1):
"""
Transform vertices. Stores the output in a single buffer.
Args:
program: The ``moderngl.Program``
buffer: The ``moderngl.buffer`` to store the output
Keyword Args:
mode: Draw mode (for example ``moderngl.POINTS``)
vertices (int): The number of vertices to transform
first (int): The index of the first vertex to start | python | {
"resource": ""
} |
q258115 | VAO.index_buffer | validation | def index_buffer(self, buffer, index_element_size=4):
"""
Set the index buffer for this VAO
Args:
buffer: ``moderngl.Buffer``, ``numpy.array`` or ``bytes``
Keyword Args:
index_element_size (int): Byte size of each element. 1, 2 or 4
"""
if not type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes]:
raise VAOError("buffer parameter must be a moderngl.Buffer, numpy.ndarray or bytes instance") | python | {
"resource": ""
} |
q258116 | VAO.instance | validation | def instance(self, program: moderngl.Program) -> moderngl.VertexArray:
"""
Obtain the ``moderngl.VertexArray`` instance for the program.
The instance is only created once and cached internally.
Returns: ``moderngl.VertexArray`` instance
"""
vao = self.vaos.get(program.glo)
if vao:
return vao
program_attributes = [name for name, attr in program._members.items() if isinstance(attr, moderngl.Attribute)]
# Make sure all attributes are covered
for attrib_name in program_attributes:
# Ignore built in attributes for now
if attrib_name.startswith('gl_'):
continue
# Do we have a buffer mapping to this attribute?
if not sum(buffer.has_attribute(attrib_name) for buffer in self.buffers):
raise VAOError("VAO {} doesn't have attribute {} for program {}".format(
self.name, attrib_name, program.name))
vao_content = []
# Pick out the attributes we can actually map
for buffer in self.buffers:
content = buffer.content(program_attributes)
if content:
| python | {
"resource": ""
} |
q258117 | VAO.release | validation | def release(self, buffer=True):
"""
Destroy the vao object
Keyword Args:
buffers (bool): also release buffers
"""
for key, vao in self.vaos:
vao.release()
if | python | {
"resource": ""
} |
q258118 | MeshProgram.draw | validation | def draw(self, mesh, projection_matrix=None, view_matrix=None, camera_matrix=None, time=0):
"""
Draw code for the mesh. Should be overriden.
:param projection_matrix: projection_matrix (bytes)
:param view_matrix: view_matrix (bytes)
:param camera_matrix: camera_matrix (bytes)
:param time: The | python | {
"resource": ""
} |
q258119 | parse_package_string | validation | def parse_package_string(path):
"""
Parse the effect package string.
Can contain the package python path or path to effect class in an effect package.
Examples::
# Path to effect pacakge
examples.cubes
# Path to effect class
examples.cubes.Cubes
Args:
path: python path to effect package. May also include effect class | python | {
"resource": ""
} |
q258120 | EffectRegistry.get_dirs | validation | def get_dirs(self) -> List[str]:
"""
Get all effect directories for registered effects.
"""
| python | {
"resource": ""
} |
q258121 | EffectRegistry.get_effect_resources | validation | def get_effect_resources(self) -> List[Any]:
"""
Get all resources registed in effect packages.
These are typically | python | {
"resource": ""
} |
q258122 | EffectRegistry.add_package | validation | def add_package(self, name):
"""
Registers a single package
:param name: (str) The effect package to add
"""
name, cls_name = parse_package_string(name)
if name in self.package_map:
return
package = EffectPackage(name)
| python | {
"resource": ""
} |
q258123 | EffectRegistry.get_package | validation | def get_package(self, name) -> 'EffectPackage':
"""
Get a package by python path. Can also contain path to an effect.
Args:
name (str): Path to effect package or effect
Returns:
| python | {
"resource": ""
} |
q258124 | EffectRegistry.find_effect_class | validation | def find_effect_class(self, path) -> Type[Effect]:
"""
Find an effect class by class name or full python path to class
Args:
path (str): effect class name or full python path to effect class
Returns:
Effect class
Raises:
EffectError if no class is found
| python | {
"resource": ""
} |
q258125 | EffectPackage.runnable_effects | validation | def runnable_effects(self) -> List[Type[Effect]]:
"""Returns the runnable effect in the package"""
| python | {
"resource": ""
} |
q258126 | EffectPackage.load_package | validation | def load_package(self):
"""FInd the effect package"""
try:
self.package = importlib.import_module(self.name)
except ModuleNotFoundError:
| python | {
"resource": ""
} |
q258127 | EffectPackage.load_effects_classes | validation | def load_effects_classes(self):
"""Iterate the module attributes picking out effects"""
self.effect_classes = []
for _, cls in inspect.getmembers(self.effect_module):
if inspect.isclass(cls):
if cls == Effect:
continue
if issubclass(cls, Effect):
| python | {
"resource": ""
} |
q258128 | EffectPackage.load_resource_module | validation | def load_resource_module(self):
"""Fetch the resource list"""
# Attempt to load the dependencies module
try:
name = '{}.{}'.format(self.name, 'dependencies')
self.dependencies_module = importlib.import_module(name)
except ModuleNotFoundError as err:
raise EffectError(
(
"Effect package '{}' has no 'dependencies' module or the module has errors. "
"Forwarded error from importlib: {}"
).format(self.name, err))
# Fetch the resource descriptions
try:
self.resources = getattr(self.dependencies_module, 'resources')
| python | {
"resource": ""
} |
q258129 | Timeline.draw | validation | def draw(self, time, frametime, target):
"""
Fetch track value for every runnable effect.
If the value is > 0.5 we draw it.
"""
for effect in self.effects:
value | python | {
"resource": ""
} |
q258130 | Loader.load | validation | def load(self):
"""Load a 2d texture"""
self._open_image()
components, data = image_data(self.image)
texture = self.ctx.texture(
self.image.size,
components,
| python | {
"resource": ""
} |
q258131 | ProgramShaders.from_single | validation | def from_single(cls, meta: ProgramDescription, source: str):
"""Initialize a single glsl string containing all shaders"""
instance = cls(meta)
instance.vertex_source = ShaderSource(
VERTEX_SHADER,
meta.path or meta.vertex_shader,
source
)
if GEOMETRY_SHADER in source:
instance.geometry_source = ShaderSource(
GEOMETRY_SHADER,
meta.path or meta.geometry_shader,
source,
)
if FRAGMENT_SHADER in source:
instance.fragment_source = ShaderSource(
FRAGMENT_SHADER,
meta.path or meta.fragment_shader,
source,
)
if TESS_CONTROL_SHADER in source:
instance.tess_control_source = ShaderSource(
| python | {
"resource": ""
} |
q258132 | ProgramShaders.from_separate | validation | def from_separate(cls, meta: ProgramDescription, vertex_source, geometry_source=None, fragment_source=None,
tess_control_source=None, tess_evaluation_source=None):
"""Initialize multiple shader strings"""
instance = cls(meta)
instance.vertex_source = ShaderSource(
VERTEX_SHADER,
meta.path or meta.vertex_shader,
vertex_source,
)
if geometry_source:
instance.geometry_source = ShaderSource(
GEOMETRY_SHADER,
meta.path or meta.geometry_shader,
geometry_source,
)
if fragment_source:
instance.fragment_source = ShaderSource(
FRAGMENT_SHADER,
meta.path or meta.fragment_shader,
fragment_source,
)
if tess_control_source:
| python | {
"resource": ""
} |
q258133 | ShaderSource.print | validation | def print(self):
"""Print the shader lines"""
print("---[ START {} ]---".format(self.name))
for i, line in enumerate(self.lines):
| python | {
"resource": ""
} |
q258134 | BaseProject.load | validation | def load(self):
"""
Loads this project instance
"""
self.create_effect_classes()
self._add_resource_descriptions_to_pools(self.create_external_resources())
self._add_resource_descriptions_to_pools(self.create_resources())
for meta, resource in resources.textures.load_pool():
self._textures[meta.label] = resource
for meta, resource in resources.programs.load_pool():
| python | {
"resource": ""
} |
q258135 | BaseProject._add_resource_descriptions_to_pools | validation | def _add_resource_descriptions_to_pools(self, meta_list):
"""
Takes a list of resource descriptions adding them
to the resource pool they belong to scheduling them for loading.
"""
if not meta_list:
| python | {
"resource": ""
} |
q258136 | BaseProject.reload_programs | validation | def reload_programs(self):
"""
Reload all shader programs with the reloadable flag set
"""
print("Reloading programs:")
for name, program in self._programs.items():
if getattr(program, 'program', None):
| python | {
"resource": ""
} |
q258137 | image_data | validation | def image_data(image):
"""Get components and bytes for an image"""
# NOTE: We might want to check the actual image.mode
# | python | {
"resource": ""
} |
q258138 | BaseLoader._find_last_of | validation | def _find_last_of(self, path, finders):
"""Find the last occurance of the file in finders"""
found_path = None
| python | {
"resource": ""
} |
q258139 | Command.initial_sanity_check | validation | def initial_sanity_check(self):
"""Checks if we can create the project"""
# Check for python module collision
self.try_import(self.project_name)
# Is the name a valid identifier?
self.validate_name(self.project_name)
# Make sure we don't mess with existing directories
if os.path.exists(self.project_name):
| python | {
"resource": ""
} |
q258140 | Command.create_entrypoint | validation | def create_entrypoint(self):
"""Write manage.py in the current directory"""
with open(os.path.join(self.template_dir, 'manage.py'), 'r') as fd:
data = fd.read().format(project_name=self.project_name)
| python | {
"resource": ""
} |
q258141 | Command.get_template_dir | validation | def get_template_dir(self):
"""Returns the absolute path to template directory"""
directory = os.path.dirname(os.path.abspath(__file__))
directory | python | {
"resource": ""
} |
q258142 | Programs.resolve_loader | validation | def resolve_loader(self, meta: ProgramDescription):
"""
Resolve program loader
"""
if not meta.loader:
meta.loader = 'single' if meta.path else 'separate'
for loader_cls in self._loaders:
if loader_cls.name == meta.loader:
meta.loader_cls = loader_cls
break
else:
| python | {
"resource": ""
} |
q258143 | ac_encode | validation | def ac_encode(text, probs):
"""Encode a text using arithmetic coding with the provided probabilities.
This is a wrapper for :py:meth:`Arithmetic.encode`.
Parameters
----------
text : str
A string to encode
probs : dict
A probability statistics dictionary generated by
:py:meth:`Arithmetic.train`
Returns
-------
tuple
| python | {
"resource": ""
} |
q258144 | Arithmetic.train | validation | def train(self, text):
r"""Generate a probability dict from the provided text.
Text to 0-order probability statistics as a dict
Parameters
----------
text : str
The text data over which to calculate probability statistics. This
must not contain the NUL (0x00) character because that is used to
indicate the end of data.
Example
-------
>>> ac = Arithmetic()
>>> ac.train('the quick brown fox jumped over the lazy dog')
>>> ac.get_probs()
{' ': (Fraction(0, 1), Fraction(8, 45)),
'o': (Fraction(8, 45), Fraction(4, 15)),
'e': (Fraction(4, 15), Fraction(16, 45)),
'u': (Fraction(16, 45), Fraction(2, 5)),
't': (Fraction(2, 5), Fraction(4, 9)),
'r': (Fraction(4, 9), Fraction(22, 45)),
'h': (Fraction(22, 45), Fraction(8, 15)),
'd': (Fraction(8, 15), Fraction(26, 45)),
'z': (Fraction(26, 45), Fraction(3, 5)),
'y': (Fraction(3, 5), Fraction(28, 45)),
'x': (Fraction(28, 45), Fraction(29, 45)),
'w': (Fraction(29, 45), Fraction(2, 3)),
'v': (Fraction(2, 3), Fraction(31, 45)),
'q': (Fraction(31, 45), Fraction(32, 45)),
'p': (Fraction(32, 45), Fraction(11, 15)),
'n': (Fraction(11, 15), Fraction(34, 45)),
'm': (Fraction(34, 45), Fraction(7, 9)),
'l': (Fraction(7, 9), Fraction(4, 5)),
'k': (Fraction(4, 5), Fraction(37, 45)),
| python | {
"resource": ""
} |
q258145 | Arithmetic.encode | validation | def encode(self, text):
"""Encode a text using arithmetic coding.
Text and the 0-order probability statistics -> longval, nbits
The encoded number is Fraction(longval, 2**nbits)
Parameters
----------
text : str
A string to encode
Returns
-------
tuple
The arithmetically coded text
Example
-------
>>> ac = Arithmetic('the quick brown fox jumped over the lazy dog')
>>> ac.encode('align')
(16720586181, 34)
"""
text = text_type(text)
if '\x00' in text:
text = text.replace('\x00', ' ')
minval = Fraction(0)
maxval = Fraction(1)
for char in text + '\x00':
prob_range = self._probs[char]
delta = maxval - minval
maxval = minval + prob_range[1] * delta
minval = minval + prob_range[0] * delta
# I tried without the /2 just to check. Doesn't work.
# Keep scaling up until the error range is >= 1. | python | {
"resource": ""
} |
q258146 | NGramCorpus.corpus_importer | validation | def corpus_importer(self, corpus, n_val=1, bos='_START_', eos='_END_'):
r"""Fill in self.ngcorpus from a Corpus argument.
Parameters
----------
corpus :Corpus
The Corpus from which to initialize the n-gram corpus
n_val : int
Maximum n value for n-grams
bos : str
String to insert as an indicator of beginning of sentence
eos : str
String to insert as an indicator of end of sentence
Raises
------
TypeError
Corpus argument of the Corpus class required.
Example
-------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n'
>>> tqbf += 'And then it slept.\n And the dog ran off.'
>>> ngcorp = NGramCorpus()
>>> ngcorp.corpus_importer(Corpus(tqbf))
"""
if not corpus or not isinstance(corpus, Corpus):
raise TypeError('Corpus argument of the Corpus class required.')
sentences = corpus.sents()
for sent in sentences:
ngs = Counter(sent)
| python | {
"resource": ""
} |
q258147 | NGramCorpus.get_count | validation | def get_count(self, ngram, corpus=None):
r"""Get the count of an n-gram in the corpus.
Parameters
----------
ngram : str
The n-gram to retrieve the count of from the n-gram corpus
corpus : Corpus
The corpus
Returns
-------
int
The n-gram count
Examples
--------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n'
>>> tqbf += 'And then it slept.\n And the dog ran off.'
>>> ngcorp = NGramCorpus(Corpus(tqbf))
| python | {
"resource": ""
} |
q258148 | NGramCorpus._add_to_ngcorpus | validation | def _add_to_ngcorpus(self, corpus, words, count):
"""Build up a corpus entry recursively.
Parameters
----------
corpus : Corpus
The corpus
words : [str]
Words | python | {
"resource": ""
} |
q258149 | NGramCorpus.gng_importer | validation | def gng_importer(self, corpus_file):
"""Fill in self.ngcorpus from a Google NGram corpus file.
Parameters
----------
corpus_file : file
The Google NGram file from which to initialize the n-gram corpus
"""
with c_open(corpus_file, 'r', encoding='utf-8') as gng:
| python | {
"resource": ""
} |
q258150 | NGramCorpus.tf | validation | def tf(self, term):
r"""Return term frequency.
Parameters
----------
term : str
The term for which to calculate tf
Returns
-------
float
The term frequency (tf)
Raises
------
ValueError
tf can only calculate the frequency of individual words
Examples
--------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n'
>>> tqbf += 'And then it slept.\n And the dog ran off.'
>>> ngcorp = NGramCorpus(Corpus(tqbf))
>>> NGramCorpus(Corpus(tqbf)).tf('the')
1.3010299956639813
| python | {
"resource": ""
} |
q258151 | BWT.encode | validation | def encode(self, word, terminator='\0'):
r"""Return the Burrows-Wheeler transformed form of a word.
Parameters
----------
word : str
The word to transform using BWT
terminator : str
A character added to signal the end of the string
Returns
-------
str
Word encoded by BWT
Raises
------
ValueError
Specified terminator absent from code.
Examples
--------
>>> bwt = BWT()
>>> bwt.encode('align')
'n\x00ilag'
>>> bwt.encode('banana')
'annb\x00aa'
>>> bwt.encode('banana', '@')
'annb@aa'
"""
if word:
if terminator in word:
raise ValueError(
| python | {
"resource": ""
} |
q258152 | BWT.decode | validation | def decode(self, code, terminator='\0'):
r"""Return a word decoded from BWT form.
Parameters
----------
code : str
The word to transform from BWT form
terminator : str
A character added to signal the end of the string
Returns
-------
str
Word decoded by BWT
Raises
------
ValueError
Specified terminator absent from code.
Examples
--------
>>> bwt = BWT()
>>> bwt.decode('n\x00ilag')
'align'
>>> bwt.decode('annb\x00aa')
'banana'
>>> bwt.decode('annb@aa', '@')
'banana'
"""
if code:
if terminator not in code:
raise ValueError(
'Specified terminator, {}, absent from code.'.format(
| python | {
"resource": ""
} |
q258153 | Indel.dist_abs | validation | def dist_abs(self, src, tar):
"""Return the indel distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
int
Indel distance
Examples
--------
>>> cmp = Indel()
>>> cmp.dist_abs('cat', 'hat')
2
| python | {
"resource": ""
} |
q258154 | Indel.dist | validation | def dist(self, src, tar):
"""Return the normalized indel distance between two strings.
This is equivalent to normalized Levenshtein distance, when only
inserts and deletes are possible.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Normalized indel distance
Examples
--------
>>> cmp = | python | {
"resource": ""
} |
q258155 | _Distance.sim | validation | def sim(self, src, tar, *args, **kwargs):
"""Return similarity.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
*args
Variable length argument list.
| python | {
"resource": ""
} |
q258156 | _Distance.dist_abs | validation | def dist_abs(self, src, tar, *args, **kwargs):
"""Return absolute distance.
Parameters
----------
src : str
Source string for comparison
tar : str
| python | {
"resource": ""
} |
q258157 | dist_baystat | validation | def dist_baystat(src, tar, min_ss_len=None, left_ext=None, right_ext=None):
"""Return the Baystat distance.
This is a wrapper for :py:meth:`Baystat.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
min_ss_len : int
Minimum substring length to be considered
left_ext : int
Left-side extension length
right_ext : int
Right-side extension length
Returns
-------
float
The Baystat distance
Examples
| python | {
"resource": ""
} |
q258158 | dist_tversky | validation | def dist_tversky(src, tar, qval=2, alpha=1, beta=1, bias=None):
"""Return the Tversky distance between two strings.
This is a wrapper for :py:meth:`Tversky.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alpha : float
Tversky index parameter as described above
beta : float
Tversky index parameter as described above
bias : float
The symmetric Tversky index bias parameter
Returns | python | {
"resource": ""
} |
q258159 | LCSseq.lcsseq | validation | def lcsseq(self, src, tar):
"""Return the longest common subsequence of two strings.
Based on the dynamic programming algorithm from
http://rosettacode.org/wiki/Longest_common_subsequence
:cite:`rosettacode:2018b`. This is licensed GFDL 1.2.
Modifications include:
conversion to a numpy array in place of a list of lists
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
str
The longest common subsequence
Examples
--------
>>> sseq = LCSseq()
>>> sseq.lcsseq('cat', 'hat')
'at'
>>> sseq.lcsseq('Niall', 'Neil')
'Nil'
>>> sseq.lcsseq('aluminum', 'Catalan')
'aln'
>>> sseq.lcsseq('ATCG', 'TAGC')
'AC'
"""
lengths = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int)
# row 0 and column 0 are initialized to 0 already
for i, src_char in enumerate(src):
for j, tar_char in enumerate(tar):
| python | {
"resource": ""
} |
q258160 | LCSseq.sim | validation | def sim(self, src, tar):
r"""Return the longest common subsequence similarity of two strings.
Longest common subsequence similarity (:math:`sim_{LCSseq}`).
This employs the LCSseq function to derive a similarity metric:
:math:`sim_{LCSseq}(s,t) = \frac{|LCSseq(s,t)|}{max(|s|, |t|)}`
Parameters
----------
src : str
Source string for comparison
| python | {
"resource": ""
} |
q258161 | Prefix.sim | validation | def sim(self, src, tar):
"""Return the prefix similarity of two strings.
Prefix similarity is the ratio of the length of the shorter term that
exactly matches the longer term to the length of the shorter term,
beginning at the start of both terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Prefix similarity
Examples
--------
>>> cmp = Prefix()
>>> cmp.sim('cat', 'hat')
0.0
>>> cmp.sim('Niall', 'Neil')
0.25
>>> cmp.sim('aluminum', 'Catalan')
0.0
>>> cmp.sim('ATCG', 'TAGC')
0.0
| python | {
"resource": ""
} |
q258162 | Corpus.docs_of_words | validation | def docs_of_words(self):
r"""Return the docs in the corpus, with sentences flattened.
Each list within the corpus represents all the words of that document.
Thus the sentence level of lists has been flattened.
Returns
-------
[[str]]
The docs in the corpus as a list of list of strs
Example
-------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n'
>>> tqbf += 'And then it slept.\n And the dog ran off.'
>>> corp = Corpus(tqbf)
>>> corp.docs_of_words()
| python | {
"resource": ""
} |
q258163 | Corpus.raw | validation | def raw(self):
r"""Return the raw corpus.
This is reconstructed by joining sub-components with the corpus' split
characters
Returns
-------
str
The raw corpus
Example
-------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n'
>>> tqbf += 'And then it slept.\n And the dog ran off.'
>>> corp = Corpus(tqbf)
>>> print(corp.raw())
The quick brown fox jumped over the lazy dog.
And then it slept.
And the dog ran off.
| python | {
"resource": ""
} |
q258164 | Corpus.idf | validation | def idf(self, term, transform=None):
r"""Calculate the Inverse Document Frequency of a term in the corpus.
Parameters
----------
term : str
The term to calculate the IDF of
transform : function
A function to apply to each document term before checking for the
presence of term
Returns
-------
float
The IDF
Examples
--------
>>> tqbf = 'The quick brown fox jumped over the lazy dog.\n\n'
>>> tqbf += 'And then it slept.\n\n And the dog ran off.'
>>> | python | {
"resource": ""
} |
q258165 | PaiceHusk.stem | validation | def stem(self, word):
"""Return Paice-Husk stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = PaiceHusk()
>>> stmr.stem('assumption')
'assum'
>>> stmr.stem('verifiable')
'ver'
>>> stmr.stem('fancies')
'fant'
>>> stmr.stem('fanciful')
'fancy'
>>> stmr.stem('torment')
'tor'
"""
terminate = False
intact = True
while not terminate:
for n in range(6, 0, -1):
| python | {
"resource": ""
} |
q258166 | BeiderMorse._language | validation | def _language(self, name, name_mode):
"""Return the best guess language ID for the word and language choices.
Parameters
----------
name : str
The term to guess the language of
name_mode : str
The name mode of the algorithm: ``gen`` (default),
``ash`` (Ashkenazi), or ``sep`` (Sephardic)
Returns
-------
int
Language ID
"""
name = name.strip().lower()
rules = BMDATA[name_mode]['language_rules']
all_langs = (
sum(_LANG_DICT[_] for _ in BMDATA[name_mode]['languages']) - 1
)
| python | {
"resource": ""
} |
q258167 | BeiderMorse._redo_language | validation | def _redo_language(
self, term, name_mode, rules, final_rules1, final_rules2, concat
):
"""Reassess the language of the terms and call the phonetic encoder.
Uses a split multi-word term.
Parameters
----------
term : str
The term to encode via Beider-Morse
name_mode : str
The name mode of the algorithm: ``gen`` (default),
``ash`` (Ashkenazi), or ``sep`` (Sephardic)
| python | {
"resource": ""
} |
q258168 | BeiderMorse._apply_final_rules | validation | def _apply_final_rules(self, phonetic, final_rules, language_arg, strip):
"""Apply a set of final rules to the phonetic encoding.
Parameters
----------
phonetic : str
The term to which to apply the final rules
final_rules : tuple
The set of final phonetic transform regexps
language_arg : int
An integer representing the target language of the phonetic
encoding
strip : bool
Flag to indicate whether to normalize the language attributes
Returns
-------
str
A Beider-Morse phonetic code
"""
# optimization to save time
if not final_rules:
return phonetic
# expand the result
phonetic = self._expand_alternates(phonetic)
phonetic_array = phonetic.split('|')
for k in range(len(phonetic_array)):
phonetic = phonetic_array[k]
phonetic2 = ''
phoneticx = self._normalize_lang_attrs(phonetic, True)
i = 0
while i < len(phonetic):
found = False
if phonetic[i] == '[': # skip over language attribute
attrib_start = i
i += 1
while True:
if phonetic[i] == ']':
i += 1
phonetic2 += phonetic[attrib_start:i]
break
i += 1
continue
for rule in final_rules:
pattern = rule[_PATTERN_POS]
pattern_length = len(pattern)
lcontext = rule[_LCONTEXT_POS]
rcontext = rule[_RCONTEXT_POS]
right = '^' + rcontext
left = lcontext + '$'
# check to see if next sequence in phonetic matches the
# string in the rule
if (pattern_length > len(phoneticx) - i) or phoneticx[
i : i + pattern_length
] != pattern:
continue
# check that right context is satisfied
if rcontext != '':
if not search(right, phoneticx[i + pattern_length :]):
continue
# check that left context is satisfied
if lcontext != '':
| python | {
"resource": ""
} |
q258169 | BeiderMorse._expand_alternates | validation | def _expand_alternates(self, phonetic):
"""Expand phonetic alternates separated by |s.
Parameters
----------
phonetic : str
A Beider-Morse phonetic encoding
Returns
-------
str
A Beider-Morse phonetic code
"""
alt_start = phonetic.find('(')
if alt_start == -1:
return self._normalize_lang_attrs(phonetic, False)
prefix = phonetic[:alt_start]
alt_start += 1 # get past the (
alt_end = phonetic.find(')', alt_start)
alt_string = phonetic[alt_start:alt_end]
alt_end += 1 # get past the )
| python | {
"resource": ""
} |
q258170 | BeiderMorse._pnums_with_leading_space | validation | def _pnums_with_leading_space(self, phonetic):
"""Join prefixes & suffixes in cases of alternate phonetic values.
Parameters
----------
phonetic : str
A Beider-Morse phonetic encoding
Returns
-------
str
A Beider-Morse phonetic code
"""
alt_start = phonetic.find('(')
if alt_start == -1:
return ' ' + self._phonetic_number(phonetic)
prefix = phonetic[:alt_start]
alt_start += 1 # get past the (
alt_end = phonetic.find(')', alt_start)
| python | {
"resource": ""
} |
q258171 | BeiderMorse._phonetic_numbers | validation | def _phonetic_numbers(self, phonetic):
"""Prepare & join phonetic numbers.
Split phonetic value on '-', run through _pnums_with_leading_space,
and join with ' '
Parameters
----------
phonetic : str
A Beider-Morse phonetic encoding
Returns
-------
str
A Beider-Morse phonetic code
"""
| python | {
"resource": ""
} |
q258172 | BeiderMorse._remove_dupes | validation | def _remove_dupes(self, phonetic):
"""Remove duplicates from a phonetic encoding list.
Parameters
----------
phonetic : str
A Beider-Morse phonetic encoding
Returns
-------
str
A Beider-Morse phonetic code
"""
alt_string = phonetic
alt_array = alt_string.split('|')
result = '|'
| python | {
"resource": ""
} |
q258173 | BeiderMorse._normalize_lang_attrs | validation | def _normalize_lang_attrs(self, text, strip):
"""Remove embedded bracketed attributes.
This (potentially) bitwise-ands bracketed attributes together and adds
to the end.
This is applied to a single alternative at a time -- not to a
parenthesized list.
It removes all embedded bracketed attributes, logically-ands them
together, and places them at the end.
However if strip is true, this can indeed remove embedded bracketed
attributes from a parenthesized list.
Parameters
----------
text : str
A Beider-Morse phonetic encoding (in progress)
strip : bool
Remove the bracketed attributes (and throw away)
Returns
-------
str
A Beider-Morse phonetic code
Raises
------
ValueError
No closing square bracket
"""
uninitialized = -1 # all 1's
attrib = uninitialized
while '[' in text:
| python | {
"resource": ""
} |
q258174 | BeiderMorse._apply_rule_if_compat | validation | def _apply_rule_if_compat(self, phonetic, target, language_arg):
"""Apply a phonetic regex if compatible.
tests for compatible language rules
to do so, apply the rule, expand the results, and detect alternatives
with incompatible attributes
then drop each alternative that has incompatible attributes and keep
those that are compatible
if there are no compatible alternatives left, return false
otherwise return the compatible alternatives
apply the rule
Parameters
----------
phonetic : str
The Beider-Morse phonetic encoding (so far)
target : str
A proposed addition to the phonetic encoding
language_arg : int
An integer representing the target language of the phonetic
encoding
Returns
-------
str
A candidate encoding
"""
candidate = phonetic + target
if '[' not in candidate: # no attributes so we need test no further
return candidate
# expand the result, converting incompatible attributes to [0]
candidate = self._expand_alternates(candidate)
| python | {
"resource": ""
} |
q258175 | BeiderMorse._language_index_from_code | validation | def _language_index_from_code(self, code, name_mode):
"""Return the index value for a language code.
This returns l_any if more than one code is specified or the code is
out of bounds.
Parameters
----------
code : int
The language code to interpret
name_mode : str
The name mode of the algorithm: ``gen`` (default),
``ash`` (Ashkenazi), or ``sep`` (Sephardic)
Returns
-------
int
Language code | python | {
"resource": ""
} |
q258176 | dist_strcmp95 | validation | def dist_strcmp95(src, tar, long_strings=False):
"""Return the strcmp95 distance between two strings.
This is a wrapper for :py:meth:`Strcmp95.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
long_strings : bool
Set to True to increase the probability of a match when the number of
matched characters is large. This option allows for a little more
tolerance when the strings are large. It is not an appropriate test
when comparing fixed length fields such as phone and social security
numbers.
Returns
-------
float
| python | {
"resource": ""
} |
q258177 | NRL.encode | validation | def encode(self, word):
"""Return the Naval Research Laboratory phonetic encoding of a word.
Parameters
----------
word : str
The word to transform
Returns
-------
str
The NRL phonetic encoding
Examples
--------
>>> pe = NRL()
>>> pe.encode('the')
'DHAX'
>>> pe.encode('round')
'rAWnd'
>>> pe.encode('quick')
'kwIHk'
>>> pe.encode('eaten')
'IYtEHn'
>>> pe.encode('Smith')
'smIHTH'
>>> pe.encode('Larsen')
'lAArsEHn'
"""
def _to_regex(pattern, left_match=True):
new_pattern = ''
replacements = {
'#': '[AEIOU]+',
':': '[BCDFGHJKLMNPQRSTVWXYZ]*',
'^': '[BCDFGHJKLMNPQRSTVWXYZ]',
'.': '[BDVGJLMNTWZ]',
'%': '(ER|E|ES|ED|ING|ELY)',
'+': '[EIY]',
' ': '^',
}
for char in pattern:
new_pattern += (
replacements[char] if char in replacements else char
)
if left_match:
new_pattern += '$'
if '^' not in pattern:
new_pattern = '^.*' + new_pattern
else:
new_pattern = '^' + new_pattern.replace('^', '$')
if '$' not in new_pattern:
new_pattern += '.*$'
return new_pattern
word = word.upper()
pron = ''
| python | {
"resource": ""
} |
q258178 | LCSstr.lcsstr | validation | def lcsstr(self, src, tar):
"""Return the longest common substring of two strings.
Longest common substring (LCSstr).
Based on the code from
https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring
:cite:`Wikibooks:2018`.
This is licensed Creative Commons: Attribution-ShareAlike 3.0.
Modifications include:
- conversion to a numpy array in place of a list of lists
- conversion to Python 2/3-safe range from xrange via six
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
str
The longest common substring
Examples
--------
>>> sstr = LCSstr()
>>> sstr.lcsstr('cat', 'hat')
'at'
>>> sstr.lcsstr('Niall', 'Neil')
'N'
>>> sstr.lcsstr('aluminum', 'Catalan')
| python | {
"resource": ""
} |
q258179 | LCSstr.sim | validation | def sim(self, src, tar):
r"""Return the longest common substring similarity of two strings.
Longest common substring similarity (:math:`sim_{LCSstr}`).
This employs the LCS function to derive a similarity metric:
:math:`sim_{LCSstr}(s,t) = \frac{|LCSstr(s,t)|}{max(|s|, |t|)}`
Parameters
----------
src : str
Source string for comparison
| python | {
"resource": ""
} |
q258180 | needleman_wunsch | validation | def needleman_wunsch(src, tar, gap_cost=1, sim_func=sim_ident):
"""Return the Needleman-Wunsch score of two strings.
This is a wrapper for :py:meth:`NeedlemanWunsch.dist_abs`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
gap_cost : float
The cost of an alignment gap (1 by default)
sim_func : function
A function that returns the similarity of two characters (identity
similarity by default)
Returns
-------
float
Needleman-Wunsch score
| python | {
"resource": ""
} |
q258181 | NeedlemanWunsch.sim_matrix | validation | def sim_matrix(
src,
tar,
mat=None,
mismatch_cost=0,
match_cost=1,
symmetric=True,
alphabet=None,
):
"""Return the matrix similarity of two strings.
With the default parameters, this is identical to sim_ident.
It is possible for sim_matrix to return values outside of the range
:math:`[0, 1]`, if values outside that range are present in mat,
mismatch_cost, or match_cost.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
mat : dict
A dict mapping tuples to costs; the tuples are (src, tar) pairs of
symbols from the alphabet parameter
mismatch_cost : float
The value returned if (src, tar) is absent from mat when src does
not equal tar
match_cost : float
The value returned if (src, tar) is absent from mat when src equals
tar
symmetric : bool
True if the cost of src not matching tar is identical to the cost
of tar not matching src; in this case, the values in mat need only
contain (src, tar) or (tar, src), not both
alphabet : str
A collection of tokens from which src and tar are drawn; if this is
defined a ValueError is raised if either tar or src is not found in
alphabet
Returns
-------
float
Matrix similarity
Raises
------
ValueError
src value not in alphabet
ValueError
tar value not in alphabet
| python | {
"resource": ""
} |
q258182 | PhoneticSpanish.encode | validation | def encode(self, word, max_length=-1):
"""Return the PhoneticSpanish coding of word.
Parameters
----------
word : str
The word to transform
max_length : int
The length of the code returned (defaults to unlimited)
Returns
-------
str
The PhoneticSpanish code
Examples
--------
>>> pe = PhoneticSpanish()
>>> pe.encode('Perez')
'094'
>>> pe.encode('Martinez')
'69364'
>>> pe.encode('Gutierrez')
'83994'
>>> pe.encode('Santiago')
'4638'
>>> pe.encode('Nicolás')
| python | {
"resource": ""
} |
q258183 | NCDbwtrle.dist | validation | def dist(self, src, tar):
"""Return the NCD between two strings using BWT plus RLE.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
Compression distance
Examples
--------
>>> cmp = NCDbwtrle()
>>> cmp.dist('cat', 'hat')
0.75
>>> cmp.dist('Niall', 'Neil')
0.8333333333333334
>>> cmp.dist('aluminum', 'Catalan')
1.0
>>> cmp.dist('ATCG', 'TAGC')
0.8
"""
| python | {
"resource": ""
} |
q258184 | ConfusionTable.to_tuple | validation | def to_tuple(self):
"""Cast to tuple.
Returns
-------
tuple
The confusion table as a 4-tuple (tp, tn, fp, fn)
Example
-------
>>> ct = ConfusionTable(120, 60, 20, | python | {
"resource": ""
} |
q258185 | ConfusionTable.to_dict | validation | def to_dict(self):
"""Cast to dict.
Returns
-------
dict
The confusion table as a dict
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
>>> import pprint
>>> pprint.pprint(ct.to_dict())
| python | {
"resource": ""
} |
q258186 | ConfusionTable.population | validation | def population(self):
"""Return population, N.
Returns
-------
int
The population (N) of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
| python | {
"resource": ""
} |
q258187 | ConfusionTable.precision | validation | def precision(self):
r"""Return precision.
Precision is defined as :math:`\frac{tp}{tp + fp}`
AKA positive predictive value (PPV)
Cf. https://en.wikipedia.org/wiki/Precision_and_recall
Cf. https://en.wikipedia.org/wiki/Information_retrieval#Precision
Returns
-------
float
The precision of the confusion table
Example
-------
| python | {
"resource": ""
} |
q258188 | ConfusionTable.precision_gain | validation | def precision_gain(self):
r"""Return gain in precision.
The gain in precision is defined as:
:math:`G(precision) = \frac{precision}{random~ precision}`
Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval)
Returns
-------
float
The gain in precision of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
>>> ct.precision_gain() | python | {
"resource": ""
} |
q258189 | ConfusionTable.recall | validation | def recall(self):
r"""Return recall.
Recall is defined as :math:`\frac{tp}{tp + fn}`
AKA sensitivity
AKA true positive rate (TPR)
Cf. https://en.wikipedia.org/wiki/Precision_and_recall
Cf. https://en.wikipedia.org/wiki/Sensitivity_(test)
Cf. https://en.wikipedia.org/wiki/Information_retrieval#Recall
Returns
-------
float
The recall of the confusion table
Example
| python | {
"resource": ""
} |
q258190 | ConfusionTable.specificity | validation | def specificity(self):
r"""Return specificity.
Specificity is defined as :math:`\frac{tn}{tn + fp}`
AKA true negative rate (TNR)
Cf. https://en.wikipedia.org/wiki/Specificity_(tests)
Returns
| python | {
"resource": ""
} |
q258191 | ConfusionTable.fallout | validation | def fallout(self):
r"""Return fall-out.
Fall-out is defined as :math:`\frac{fp}{fp + tn}`
AKA false positive rate (FPR)
Cf. https://en.wikipedia.org/wiki/Information_retrieval#Fall-out
Returns
-------
float
The fall-out of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, | python | {
"resource": ""
} |
q258192 | ConfusionTable.accuracy | validation | def accuracy(self):
r"""Return accuracy.
Accuracy is defined as :math:`\frac{tp + tn}{population}`
Cf. https://en.wikipedia.org/wiki/Accuracy
Returns
-------
float
| python | {
"resource": ""
} |
q258193 | ConfusionTable.accuracy_gain | validation | def accuracy_gain(self):
r"""Return gain in accuracy.
The gain in accuracy is defined as:
:math:`G(accuracy) = \frac{accuracy}{random~ accuracy}`
Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval)
Returns
-------
float
The gain in accuracy of the confusion table
Example
-------
>>> ct = ConfusionTable(120, 60, 20, 30)
>>> ct.accuracy_gain()
1.4325259515570934
"""
| python | {
"resource": ""
} |
q258194 | ConfusionTable.pr_lmean | validation | def pr_lmean(self):
r"""Return logarithmic mean of precision & recall.
The logarithmic mean is:
0 if either precision or recall is 0,
the precision if they are equal,
otherwise :math:`\frac{precision - recall}
{ln(precision) - ln(recall)}`
Cf. https://en.wikipedia.org/wiki/Logarithmic_mean
Returns
-------
float
The logarithmic mean of the confusion table's precision & recall
Example
-------
>>> ct = ConfusionTable(120, 60, | python | {
"resource": ""
} |
q258195 | CLEFGerman.stem | validation | def stem(self, word):
"""Return CLEF German stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = CLEFGerman()
>>> stmr.stem('lesen')
'lese'
>>> stmr.stem('graues')
'grau'
>>> stmr.stem('buchstabieren')
'buchstabier'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
# remove umlauts
word = word.translate(self._umlauts)
| python | {
"resource": ""
} |
q258196 | Sift4Simplest.dist_abs | validation | def dist_abs(self, src, tar, max_offset=5):
"""Return the "simplest" Sift4 distance between two terms.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
max_offset : int
The number of characters to search for matching letters
Returns
-------
int
The Sift4 distance according to the simplest formula
Examples
--------
>>> cmp = Sift4Simplest()
>>> cmp.dist_abs('cat', 'hat')
1
>>> cmp.dist_abs('Niall', 'Neil')
2
>>> cmp.dist_abs('Colin', 'Cuilen')
3
>>> cmp.dist_abs('ATCG', 'TAGC')
2
"""
if not src:
return len(tar)
if not tar:
return len(src)
src_len = len(src)
tar_len = len(tar)
src_cur = 0
tar_cur = 0
lcss = 0
local_cs = 0
while (src_cur < src_len) and (tar_cur < tar_len):
if src[src_cur] == tar[tar_cur]:
local_cs += 1
else:
lcss += local_cs
local_cs = 0
if src_cur != tar_cur:
src_cur = tar_cur = max(src_cur, tar_cur)
for i in range(max_offset):
if not (
(src_cur + i < src_len) or (tar_cur | python | {
"resource": ""
} |
q258197 | sim_typo | validation | def sim_typo(
src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY'
):
"""Return the normalized typo similarity between two strings.
This is a wrapper for :py:meth:`Typo.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
metric : str
Supported values include: ``euclidean``, ``manhattan``,
``log-euclidean``, and ``log-manhattan``
cost : tuple
A 4-tuple representing the cost of the four possible edits: inserts,
deletes, substitutions, and shift, respectively (by default:
(1, 1, 0.5, 0.5)) The substitution & shift costs should be
significantly less than the cost of an insertion & deletion unless a
log metric is used.
| python | {
"resource": ""
} |
q258198 | manhattan | validation | def manhattan(src, tar, qval=2, normalized=False, alphabet=None):
"""Return the Manhattan distance between two strings.
This is a wrapper for :py:meth:`Manhattan.dist_abs`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
normalized : bool
Normalizes to [0, 1] if True
alphabet : collection or int
The values or size of the alphabet
| python | {
"resource": ""
} |
q258199 | dist_manhattan | validation | def dist_manhattan(src, tar, qval=2, alphabet=None):
"""Return the normalized Manhattan distance between two strings.
This is a wrapper for :py:meth:`Manhattan.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Manhattan | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.