_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q258100
BaseWindow.clear_values
validation
def clear_values(self, red=0.0, green=0.0, blue=0.0, alpha=0.0, depth=1.0): """ Sets the clear values for the window buffer. Args: red (float): red compoent green (float): green compoent blue (float): blue compoent alpha (float): alpha compoent depth (float): depth value """ self.clear_color = (red, green, blue, alpha) self.clear_depth = depth
python
{ "resource": "" }
q258101
BaseWindow.keyboard_event
validation
def keyboard_event(self, key, action, modifier): """ Handles the standard keyboard events such as camera movements, taking a screenshot, closing the window etc. Can be overriden add new keyboard events. Ensure this method is also called if you want to keep the standard features. Arguments: key: The key that was pressed or released action: The key action. Can be `ACTION_PRESS` or `ACTION_RELEASE` modifier: Modifiers such as holding shift or ctrl """ # The well-known standard key for quick exit if key == self.keys.ESCAPE: self.close() return # Toggle pause time if key == self.keys.SPACE and action == self.keys.ACTION_PRESS: self.timer.toggle_pause() # Camera movement # Right if key == self.keys.D: if action == self.keys.ACTION_PRESS: self.sys_camera.move_right(True) elif action == self.keys.ACTION_RELEASE: self.sys_camera.move_right(False) # Left elif key == self.keys.A: if action == self.keys.ACTION_PRESS: self.sys_camera.move_left(True) elif action == self.keys.ACTION_RELEASE: self.sys_camera.move_left(False) # Forward elif key == self.keys.W: if action == self.keys.ACTION_PRESS: self.sys_camera.move_forward(True) if action == self.keys.ACTION_RELEASE: self.sys_camera.move_forward(False) # Backwards elif key == self.keys.S: if action == self.keys.ACTION_PRESS: self.sys_camera.move_backward(True) if action == self.keys.ACTION_RELEASE: self.sys_camera.move_backward(False) # UP elif key == self.keys.Q: if action == self.keys.ACTION_PRESS: self.sys_camera.move_down(True) if action == self.keys.ACTION_RELEASE: self.sys_camera.move_down(False) # Down elif key == self.keys.E: if action == self.keys.ACTION_PRESS: self.sys_camera.move_up(True) if action == self.keys.ACTION_RELEASE: self.sys_camera.move_up(False) # Screenshots if key == self.keys.X and action == self.keys.ACTION_PRESS: screenshot.create() if key == self.keys.R and action == self.keys.ACTION_PRESS: project.instance.reload_programs() if key == self.keys.RIGHT and action == self.keys.ACTION_PRESS: self.timer.set_time(self.timer.get_time() + 10.0) if key == self.keys.LEFT and action == self.keys.ACTION_PRESS: self.timer.set_time(self.timer.get_time() - 10.0) # Forward the event to the timeline self.timeline.key_event(key, action, modifier)
python
{ "resource": "" }
q258102
BaseWindow.cursor_event
validation
def cursor_event(self, x, y, dx, dy): """ The standard mouse movement event method. Can be overriden to add new functionality. By default this feeds the system camera with new values. Args: x: The current mouse x position y: The current mouse y position dx: Delta x postion (x position difference from the previous event) dy: Delta y postion (y position difference from the previous event) """ self.sys_camera.rot_state(x, y)
python
{ "resource": "" }
q258103
BaseWindow.set_default_viewport
validation
def set_default_viewport(self): """ Calculates the viewport based on the configured aspect ratio in settings. Will add black borders if the window do not match the viewport. """ # The expected height with the current viewport width expected_height = int(self.buffer_width / self.aspect_ratio) # How much positive or negative y padding blank_space = self.buffer_height - expected_height self.fbo.viewport = (0, blank_space // 2, self.buffer_width, expected_height)
python
{ "resource": "" }
q258104
Timer.start
validation
def start(self): """Start the timer""" self.music.start() if not self.start_paused: self.rocket.start()
python
{ "resource": "" }
q258105
Timer.toggle_pause
validation
def toggle_pause(self): """Toggle pause mode""" self.controller.playing = not self.controller.playing self.music.toggle_pause()
python
{ "resource": "" }
q258106
SceneLoader.supports_file
validation
def supports_file(cls, meta): """Check if the loader has a supported file extension""" path = Path(meta.path) for ext in cls.file_extensions: if path.suffixes[:len(ext)] == ext: return True return False
python
{ "resource": "" }
q258107
Tracks.get
validation
def get(self, name) -> Track: """ Get or create a Track object. :param name: Name of the track :return: Track object """ name = name.lower() track = self.track_map.get(name) if not track: track = Track(name) self.tacks.append(track) self.track_map[name] = track return track
python
{ "resource": "" }
q258108
find_commands
validation
def find_commands(command_dir: str) -> List[str]: """ Get all command names in the a folder :return: List of commands names """ if not command_dir: return [] return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir]) if not is_pkg and not name.startswith('_')]
python
{ "resource": "" }
q258109
Settings.update
validation
def update(self, **kwargs): """Override settings values""" for name, value in kwargs.items(): setattr(self, name, value)
python
{ "resource": "" }
q258110
Settings.add_program_dir
validation
def add_program_dir(self, directory): """Hack in program directory""" dirs = list(self.PROGRAM_DIRS) dirs.append(directory) self.PROGRAM_DIRS = dirs
python
{ "resource": "" }
q258111
Settings.add_texture_dir
validation
def add_texture_dir(self, directory): """Hack in texture directory""" dirs = list(self.TEXTURE_DIRS) dirs.append(directory) self.TEXTURE_DIRS = dirs
python
{ "resource": "" }
q258112
Settings.add_data_dir
validation
def add_data_dir(self, directory): """Hack in a data directory""" dirs = list(self.DATA_DIRS) dirs.append(directory) self.DATA_DIRS = dirs
python
{ "resource": "" }
q258113
VAO.render
validation
def render(self, program: moderngl.Program, mode=None, vertices=-1, first=0, instances=1): """ Render the VAO. Args: program: The ``moderngl.Program`` Keyword Args: mode: Override the draw mode (``TRIANGLES`` etc) vertices (int): The number of vertices to transform first (int): The index of the first vertex to start with instances (int): The number of instances """ vao = self.instance(program) if mode is None: mode = self.mode vao.render(mode, vertices=vertices, first=first, instances=instances)
python
{ "resource": "" }
q258114
VAO.transform
validation
def transform(self, program: moderngl.Program, buffer: moderngl.Buffer, mode=None, vertices=-1, first=0, instances=1): """ Transform vertices. Stores the output in a single buffer. Args: program: The ``moderngl.Program`` buffer: The ``moderngl.buffer`` to store the output Keyword Args: mode: Draw mode (for example ``moderngl.POINTS``) vertices (int): The number of vertices to transform first (int): The index of the first vertex to start with instances (int): The number of instances """ vao = self.instance(program) if mode is None: mode = self.mode vao.transform(buffer, mode=mode, vertices=vertices, first=first, instances=instances)
python
{ "resource": "" }
q258115
VAO.index_buffer
validation
def index_buffer(self, buffer, index_element_size=4): """ Set the index buffer for this VAO Args: buffer: ``moderngl.Buffer``, ``numpy.array`` or ``bytes`` Keyword Args: index_element_size (int): Byte size of each element. 1, 2 or 4 """ if not type(buffer) in [moderngl.Buffer, numpy.ndarray, bytes]: raise VAOError("buffer parameter must be a moderngl.Buffer, numpy.ndarray or bytes instance") if isinstance(buffer, numpy.ndarray): buffer = self.ctx.buffer(buffer.tobytes()) if isinstance(buffer, bytes): buffer = self.ctx.buffer(data=buffer) self._index_buffer = buffer self._index_element_size = index_element_size
python
{ "resource": "" }
q258116
VAO.instance
validation
def instance(self, program: moderngl.Program) -> moderngl.VertexArray: """ Obtain the ``moderngl.VertexArray`` instance for the program. The instance is only created once and cached internally. Returns: ``moderngl.VertexArray`` instance """ vao = self.vaos.get(program.glo) if vao: return vao program_attributes = [name for name, attr in program._members.items() if isinstance(attr, moderngl.Attribute)] # Make sure all attributes are covered for attrib_name in program_attributes: # Ignore built in attributes for now if attrib_name.startswith('gl_'): continue # Do we have a buffer mapping to this attribute? if not sum(buffer.has_attribute(attrib_name) for buffer in self.buffers): raise VAOError("VAO {} doesn't have attribute {} for program {}".format( self.name, attrib_name, program.name)) vao_content = [] # Pick out the attributes we can actually map for buffer in self.buffers: content = buffer.content(program_attributes) if content: vao_content.append(content) # Any attribute left is not accounted for if program_attributes: for attrib_name in program_attributes: if attrib_name.startswith('gl_'): continue raise VAOError("Did not find a buffer mapping for {}".format([n for n in program_attributes])) # Create the vao if self._index_buffer: vao = context.ctx().vertex_array(program, vao_content, self._index_buffer, self._index_element_size) else: vao = context.ctx().vertex_array(program, vao_content) self.vaos[program.glo] = vao return vao
python
{ "resource": "" }
q258117
VAO.release
validation
def release(self, buffer=True): """ Destroy the vao object Keyword Args: buffers (bool): also release buffers """ for key, vao in self.vaos: vao.release() if buffer: for buff in self.buffers: buff.buffer.release() if self._index_buffer: self._index_buffer.release()
python
{ "resource": "" }
q258118
MeshProgram.draw
validation
def draw(self, mesh, projection_matrix=None, view_matrix=None, camera_matrix=None, time=0): """ Draw code for the mesh. Should be overriden. :param projection_matrix: projection_matrix (bytes) :param view_matrix: view_matrix (bytes) :param camera_matrix: camera_matrix (bytes) :param time: The current time """ self.program["m_proj"].write(projection_matrix) self.program["m_mv"].write(view_matrix) mesh.vao.render(self.program)
python
{ "resource": "" }
q258119
parse_package_string
validation
def parse_package_string(path): """ Parse the effect package string. Can contain the package python path or path to effect class in an effect package. Examples:: # Path to effect pacakge examples.cubes # Path to effect class examples.cubes.Cubes Args: path: python path to effect package. May also include effect class name. Returns: tuple: (package_path, effect_class) """ parts = path.split('.') # Is the last entry in the path capitalized? if parts[-1][0].isupper(): return ".".join(parts[:-1]), parts[-1] return path, ""
python
{ "resource": "" }
q258120
EffectRegistry.get_dirs
validation
def get_dirs(self) -> List[str]: """ Get all effect directories for registered effects. """ for package in self.packages: yield os.path.join(package.path, 'resources')
python
{ "resource": "" }
q258121
EffectRegistry.get_effect_resources
validation
def get_effect_resources(self) -> List[Any]: """ Get all resources registed in effect packages. These are typically located in ``resources.py`` """ resources = [] for package in self.packages: resources.extend(package.resources) return resources
python
{ "resource": "" }
q258122
EffectRegistry.add_package
validation
def add_package(self, name): """ Registers a single package :param name: (str) The effect package to add """ name, cls_name = parse_package_string(name) if name in self.package_map: return package = EffectPackage(name) package.load() self.packages.append(package) self.package_map[package.name] = package # Load effect package dependencies self.polulate(package.effect_packages)
python
{ "resource": "" }
q258123
EffectRegistry.get_package
validation
def get_package(self, name) -> 'EffectPackage': """ Get a package by python path. Can also contain path to an effect. Args: name (str): Path to effect package or effect Returns: The requested EffectPackage Raises: EffectError when no package is found """ name, cls_name = parse_package_string(name) try: return self.package_map[name] except KeyError: raise EffectError("No package '{}' registered".format(name))
python
{ "resource": "" }
q258124
EffectRegistry.find_effect_class
validation
def find_effect_class(self, path) -> Type[Effect]: """ Find an effect class by class name or full python path to class Args: path (str): effect class name or full python path to effect class Returns: Effect class Raises: EffectError if no class is found """ package_name, class_name = parse_package_string(path) if package_name: package = self.get_package(package_name) return package.find_effect_class(class_name, raise_for_error=True) for package in self.packages: effect_cls = package.find_effect_class(class_name) if effect_cls: return effect_cls raise EffectError("No effect class '{}' found in any packages".format(class_name))
python
{ "resource": "" }
q258125
EffectPackage.runnable_effects
validation
def runnable_effects(self) -> List[Type[Effect]]: """Returns the runnable effect in the package""" return [cls for cls in self.effect_classes if cls.runnable]
python
{ "resource": "" }
q258126
EffectPackage.load_package
validation
def load_package(self): """FInd the effect package""" try: self.package = importlib.import_module(self.name) except ModuleNotFoundError: raise ModuleNotFoundError("Effect package '{}' not found.".format(self.name))
python
{ "resource": "" }
q258127
EffectPackage.load_effects_classes
validation
def load_effects_classes(self): """Iterate the module attributes picking out effects""" self.effect_classes = [] for _, cls in inspect.getmembers(self.effect_module): if inspect.isclass(cls): if cls == Effect: continue if issubclass(cls, Effect): self.effect_classes.append(cls) self.effect_class_map[cls.__name__] = cls cls._name = "{}.{}".format(self.effect_module_name, cls.__name__)
python
{ "resource": "" }
q258128
EffectPackage.load_resource_module
validation
def load_resource_module(self): """Fetch the resource list""" # Attempt to load the dependencies module try: name = '{}.{}'.format(self.name, 'dependencies') self.dependencies_module = importlib.import_module(name) except ModuleNotFoundError as err: raise EffectError( ( "Effect package '{}' has no 'dependencies' module or the module has errors. " "Forwarded error from importlib: {}" ).format(self.name, err)) # Fetch the resource descriptions try: self.resources = getattr(self.dependencies_module, 'resources') except AttributeError: raise EffectError("Effect dependencies module '{}' has no 'resources' attribute".format(name)) if not isinstance(self.resources, list): raise EffectError( "Effect dependencies module '{}': 'resources' is of type {} instead of a list".format( name, type(self.resources))) # Fetch the effect class list try: self.effect_packages = getattr(self.dependencies_module, 'effect_packages') except AttributeError: raise EffectError("Effect dependencies module '{}' has 'effect_packages' attribute".format(name)) if not isinstance(self.effect_packages, list): raise EffectError( "Effect dependencies module '{}': 'effect_packages' is of type {} instead of a list".format( name, type(self.effects)))
python
{ "resource": "" }
q258129
Timeline.draw
validation
def draw(self, time, frametime, target): """ Fetch track value for every runnable effect. If the value is > 0.5 we draw it. """ for effect in self.effects: value = effect.rocket_timeline_track.time_value(time) if value > 0.5: effect.draw(time, frametime, target)
python
{ "resource": "" }
q258130
Loader.load
validation
def load(self): """Load a 2d texture""" self._open_image() components, data = image_data(self.image) texture = self.ctx.texture( self.image.size, components, data, ) texture.extra = {'meta': self.meta} if self.meta.mipmap: texture.build_mipmaps() self._close_image() return texture
python
{ "resource": "" }
q258131
ProgramShaders.from_single
validation
def from_single(cls, meta: ProgramDescription, source: str): """Initialize a single glsl string containing all shaders""" instance = cls(meta) instance.vertex_source = ShaderSource( VERTEX_SHADER, meta.path or meta.vertex_shader, source ) if GEOMETRY_SHADER in source: instance.geometry_source = ShaderSource( GEOMETRY_SHADER, meta.path or meta.geometry_shader, source, ) if FRAGMENT_SHADER in source: instance.fragment_source = ShaderSource( FRAGMENT_SHADER, meta.path or meta.fragment_shader, source, ) if TESS_CONTROL_SHADER in source: instance.tess_control_source = ShaderSource( TESS_CONTROL_SHADER, meta.path or meta.tess_control_shader, source, ) if TESS_EVALUATION_SHADER in source: instance.tess_evaluation_source = ShaderSource( TESS_EVALUATION_SHADER, meta.path or meta.tess_evaluation_shader, source, ) return instance
python
{ "resource": "" }
q258132
ProgramShaders.from_separate
validation
def from_separate(cls, meta: ProgramDescription, vertex_source, geometry_source=None, fragment_source=None, tess_control_source=None, tess_evaluation_source=None): """Initialize multiple shader strings""" instance = cls(meta) instance.vertex_source = ShaderSource( VERTEX_SHADER, meta.path or meta.vertex_shader, vertex_source, ) if geometry_source: instance.geometry_source = ShaderSource( GEOMETRY_SHADER, meta.path or meta.geometry_shader, geometry_source, ) if fragment_source: instance.fragment_source = ShaderSource( FRAGMENT_SHADER, meta.path or meta.fragment_shader, fragment_source, ) if tess_control_source: instance.tess_control_source = ShaderSource( TESS_CONTROL_SHADER, meta.path or meta.tess_control_shader, tess_control_source, ) if tess_evaluation_source: instance.tess_evaluation_source = ShaderSource( TESS_EVALUATION_SHADER, meta.path or meta.tess_control_shader, tess_evaluation_source, ) return instance
python
{ "resource": "" }
q258133
ShaderSource.print
validation
def print(self): """Print the shader lines""" print("---[ START {} ]---".format(self.name)) for i, line in enumerate(self.lines): print("{}: {}".format(str(i).zfill(3), line)) print("---[ END {} ]---".format(self.name))
python
{ "resource": "" }
q258134
BaseProject.load
validation
def load(self): """ Loads this project instance """ self.create_effect_classes() self._add_resource_descriptions_to_pools(self.create_external_resources()) self._add_resource_descriptions_to_pools(self.create_resources()) for meta, resource in resources.textures.load_pool(): self._textures[meta.label] = resource for meta, resource in resources.programs.load_pool(): self._programs[meta.label] = resource for meta, resource in resources.scenes.load_pool(): self._scenes[meta.label] = resource for meta, resource in resources.data.load_pool(): self._data[meta.label] = resource self.create_effect_instances() self.post_load()
python
{ "resource": "" }
q258135
BaseProject._add_resource_descriptions_to_pools
validation
def _add_resource_descriptions_to_pools(self, meta_list): """ Takes a list of resource descriptions adding them to the resource pool they belong to scheduling them for loading. """ if not meta_list: return for meta in meta_list: getattr(resources, meta.resource_type).add(meta)
python
{ "resource": "" }
q258136
BaseProject.reload_programs
validation
def reload_programs(self): """ Reload all shader programs with the reloadable flag set """ print("Reloading programs:") for name, program in self._programs.items(): if getattr(program, 'program', None): print(" - {}".format(program.meta.label)) program.program = resources.programs.load(program.meta)
python
{ "resource": "" }
q258137
image_data
validation
def image_data(image): """Get components and bytes for an image""" # NOTE: We might want to check the actual image.mode # and convert to an acceptable format. # At the moment we load the data as is. data = image.tobytes() components = len(data) // (image.size[0] * image.size[1]) return components, data
python
{ "resource": "" }
q258138
BaseLoader._find_last_of
validation
def _find_last_of(self, path, finders): """Find the last occurance of the file in finders""" found_path = None for finder in finders: result = finder.find(path) if result: found_path = result return found_path
python
{ "resource": "" }
q258139
Command.initial_sanity_check
validation
def initial_sanity_check(self): """Checks if we can create the project""" # Check for python module collision self.try_import(self.project_name) # Is the name a valid identifier? self.validate_name(self.project_name) # Make sure we don't mess with existing directories if os.path.exists(self.project_name): print("Directory {} already exist. Aborting.".format(self.project_name)) return False if os.path.exists('manage.py'): print("A manage.py file already exist in the current directory. Aborting.") return False return True
python
{ "resource": "" }
q258140
Command.create_entrypoint
validation
def create_entrypoint(self): """Write manage.py in the current directory""" with open(os.path.join(self.template_dir, 'manage.py'), 'r') as fd: data = fd.read().format(project_name=self.project_name) with open('manage.py', 'w') as fd: fd.write(data) os.chmod('manage.py', 0o777)
python
{ "resource": "" }
q258141
Command.get_template_dir
validation
def get_template_dir(self): """Returns the absolute path to template directory""" directory = os.path.dirname(os.path.abspath(__file__)) directory = os.path.dirname(os.path.dirname(directory)) directory = os.path.join(directory, 'project_template') return directory
python
{ "resource": "" }
q258142
Programs.resolve_loader
validation
def resolve_loader(self, meta: ProgramDescription): """ Resolve program loader """ if not meta.loader: meta.loader = 'single' if meta.path else 'separate' for loader_cls in self._loaders: if loader_cls.name == meta.loader: meta.loader_cls = loader_cls break else: raise ImproperlyConfigured( ( "Program {} has no loader class registered." "Check PROGRAM_LOADERS or PROGRAM_DIRS" ).format(meta.path) )
python
{ "resource": "" }
q258143
ac_encode
validation
def ac_encode(text, probs): """Encode a text using arithmetic coding with the provided probabilities. This is a wrapper for :py:meth:`Arithmetic.encode`. Parameters ---------- text : str A string to encode probs : dict A probability statistics dictionary generated by :py:meth:`Arithmetic.train` Returns ------- tuple The arithmetically coded text Example ------- >>> pr = ac_train('the quick brown fox jumped over the lazy dog') >>> ac_encode('align', pr) (16720586181, 34) """ coder = Arithmetic() coder.set_probs(probs) return coder.encode(text)
python
{ "resource": "" }
q258144
Arithmetic.train
validation
def train(self, text): r"""Generate a probability dict from the provided text. Text to 0-order probability statistics as a dict Parameters ---------- text : str The text data over which to calculate probability statistics. This must not contain the NUL (0x00) character because that is used to indicate the end of data. Example ------- >>> ac = Arithmetic() >>> ac.train('the quick brown fox jumped over the lazy dog') >>> ac.get_probs() {' ': (Fraction(0, 1), Fraction(8, 45)), 'o': (Fraction(8, 45), Fraction(4, 15)), 'e': (Fraction(4, 15), Fraction(16, 45)), 'u': (Fraction(16, 45), Fraction(2, 5)), 't': (Fraction(2, 5), Fraction(4, 9)), 'r': (Fraction(4, 9), Fraction(22, 45)), 'h': (Fraction(22, 45), Fraction(8, 15)), 'd': (Fraction(8, 15), Fraction(26, 45)), 'z': (Fraction(26, 45), Fraction(3, 5)), 'y': (Fraction(3, 5), Fraction(28, 45)), 'x': (Fraction(28, 45), Fraction(29, 45)), 'w': (Fraction(29, 45), Fraction(2, 3)), 'v': (Fraction(2, 3), Fraction(31, 45)), 'q': (Fraction(31, 45), Fraction(32, 45)), 'p': (Fraction(32, 45), Fraction(11, 15)), 'n': (Fraction(11, 15), Fraction(34, 45)), 'm': (Fraction(34, 45), Fraction(7, 9)), 'l': (Fraction(7, 9), Fraction(4, 5)), 'k': (Fraction(4, 5), Fraction(37, 45)), 'j': (Fraction(37, 45), Fraction(38, 45)), 'i': (Fraction(38, 45), Fraction(13, 15)), 'g': (Fraction(13, 15), Fraction(8, 9)), 'f': (Fraction(8, 9), Fraction(41, 45)), 'c': (Fraction(41, 45), Fraction(14, 15)), 'b': (Fraction(14, 15), Fraction(43, 45)), 'a': (Fraction(43, 45), Fraction(44, 45)), '\x00': (Fraction(44, 45), Fraction(1, 1))} """ text = text_type(text) if '\x00' in text: text = text.replace('\x00', ' ') counts = Counter(text) counts['\x00'] = 1 tot_letters = sum(counts.values()) tot = 0 self._probs = {} prev = Fraction(0) for char, count in sorted( counts.items(), key=lambda x: (x[1], x[0]), reverse=True ): follow = Fraction(tot + count, tot_letters) self._probs[char] = (prev, follow) prev = follow tot = tot + count
python
{ "resource": "" }
q258145
Arithmetic.encode
validation
def encode(self, text): """Encode a text using arithmetic coding. Text and the 0-order probability statistics -> longval, nbits The encoded number is Fraction(longval, 2**nbits) Parameters ---------- text : str A string to encode Returns ------- tuple The arithmetically coded text Example ------- >>> ac = Arithmetic('the quick brown fox jumped over the lazy dog') >>> ac.encode('align') (16720586181, 34) """ text = text_type(text) if '\x00' in text: text = text.replace('\x00', ' ') minval = Fraction(0) maxval = Fraction(1) for char in text + '\x00': prob_range = self._probs[char] delta = maxval - minval maxval = minval + prob_range[1] * delta minval = minval + prob_range[0] * delta # I tried without the /2 just to check. Doesn't work. # Keep scaling up until the error range is >= 1. That # gives me the minimum number of bits needed to resolve # down to the end-of-data character. delta = (maxval - minval) / 2 nbits = long(0) while delta < 1: nbits += 1 delta *= 2 # The below condition shouldn't ever be false if nbits == 0: # pragma: no cover return 0, 0 # using -1 instead of /2 avg = (maxval + minval) * 2 ** (nbits - 1) # Could return a rational instead ... # the division truncation is deliberate return avg.numerator // avg.denominator, nbits
python
{ "resource": "" }
q258146
NGramCorpus.corpus_importer
validation
def corpus_importer(self, corpus, n_val=1, bos='_START_', eos='_END_'): r"""Fill in self.ngcorpus from a Corpus argument. Parameters ---------- corpus :Corpus The Corpus from which to initialize the n-gram corpus n_val : int Maximum n value for n-grams bos : str String to insert as an indicator of beginning of sentence eos : str String to insert as an indicator of end of sentence Raises ------ TypeError Corpus argument of the Corpus class required. Example ------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> ngcorp = NGramCorpus() >>> ngcorp.corpus_importer(Corpus(tqbf)) """ if not corpus or not isinstance(corpus, Corpus): raise TypeError('Corpus argument of the Corpus class required.') sentences = corpus.sents() for sent in sentences: ngs = Counter(sent) for key in ngs.keys(): self._add_to_ngcorpus(self.ngcorpus, [key], ngs[key]) if n_val > 1: if bos and bos != '': sent = [bos] + sent if eos and eos != '': sent += [eos] for i in range(2, n_val + 1): for j in range(len(sent) - i + 1): self._add_to_ngcorpus( self.ngcorpus, sent[j : j + i], 1 )
python
{ "resource": "" }
q258147
NGramCorpus.get_count
validation
def get_count(self, ngram, corpus=None): r"""Get the count of an n-gram in the corpus. Parameters ---------- ngram : str The n-gram to retrieve the count of from the n-gram corpus corpus : Corpus The corpus Returns ------- int The n-gram count Examples -------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> ngcorp = NGramCorpus(Corpus(tqbf)) >>> NGramCorpus(Corpus(tqbf)).get_count('the') 2 >>> NGramCorpus(Corpus(tqbf)).get_count('fox') 1 """ if not corpus: corpus = self.ngcorpus # if ngram is empty, we're at our leaf node and should return the # value in None if not ngram: return corpus[None] # support strings or lists/tuples by splitting strings if isinstance(ngram, (text_type, str)): ngram = text_type(ngram).split() # if ngram is not empty, check whether the next element is in the # corpus; if so, recurse--if not, return 0 if ngram[0] in corpus: return self.get_count(ngram[1:], corpus[ngram[0]]) return 0
python
{ "resource": "" }
q258148
NGramCorpus._add_to_ngcorpus
validation
def _add_to_ngcorpus(self, corpus, words, count): """Build up a corpus entry recursively. Parameters ---------- corpus : Corpus The corpus words : [str] Words to add to the corpus count : int Count of words """ if words[0] not in corpus: corpus[words[0]] = Counter() if len(words) == 1: corpus[words[0]][None] += count else: self._add_to_ngcorpus(corpus[words[0]], words[1:], count)
python
{ "resource": "" }
q258149
NGramCorpus.gng_importer
validation
def gng_importer(self, corpus_file): """Fill in self.ngcorpus from a Google NGram corpus file. Parameters ---------- corpus_file : file The Google NGram file from which to initialize the n-gram corpus """ with c_open(corpus_file, 'r', encoding='utf-8') as gng: for line in gng: line = line.rstrip().split('\t') words = line[0].split() self._add_to_ngcorpus(self.ngcorpus, words, int(line[2]))
python
{ "resource": "" }
q258150
NGramCorpus.tf
validation
def tf(self, term): r"""Return term frequency. Parameters ---------- term : str The term for which to calculate tf Returns ------- float The term frequency (tf) Raises ------ ValueError tf can only calculate the frequency of individual words Examples -------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> ngcorp = NGramCorpus(Corpus(tqbf)) >>> NGramCorpus(Corpus(tqbf)).tf('the') 1.3010299956639813 >>> NGramCorpus(Corpus(tqbf)).tf('fox') 1.0 """ if ' ' in term: raise ValueError( 'tf can only calculate the term frequency of individual words' ) tcount = self.get_count(term) if tcount == 0: return 0.0 return 1 + log10(tcount)
python
{ "resource": "" }
q258151
BWT.encode
validation
def encode(self, word, terminator='\0'): r"""Return the Burrows-Wheeler transformed form of a word. Parameters ---------- word : str The word to transform using BWT terminator : str A character added to signal the end of the string Returns ------- str Word encoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.encode('align') 'n\x00ilag' >>> bwt.encode('banana') 'annb\x00aa' >>> bwt.encode('banana', '@') 'annb@aa' """ if word: if terminator in word: raise ValueError( 'Specified terminator, {}, already in word.'.format( terminator if terminator != '\0' else '\\0' ) ) else: word += terminator wordlist = sorted( word[i:] + word[:i] for i in range(len(word)) ) return ''.join([w[-1] for w in wordlist]) else: return terminator
python
{ "resource": "" }
q258152
BWT.decode
validation
def decode(self, code, terminator='\0'): r"""Return a word decoded from BWT form. Parameters ---------- code : str The word to transform from BWT form terminator : str A character added to signal the end of the string Returns ------- str Word decoded by BWT Raises ------ ValueError Specified terminator absent from code. Examples -------- >>> bwt = BWT() >>> bwt.decode('n\x00ilag') 'align' >>> bwt.decode('annb\x00aa') 'banana' >>> bwt.decode('annb@aa', '@') 'banana' """ if code: if terminator not in code: raise ValueError( 'Specified terminator, {}, absent from code.'.format( terminator if terminator != '\0' else '\\0' ) ) else: wordlist = [''] * len(code) for i in range(len(code)): wordlist = sorted( code[i] + wordlist[i] for i in range(len(code)) ) rows = [w for w in wordlist if w[-1] == terminator][0] return rows.rstrip(terminator) else: return ''
python
{ "resource": "" }
q258153
Indel.dist_abs
validation
def dist_abs(self, src, tar): """Return the indel distance between two strings. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- int Indel distance Examples -------- >>> cmp = Indel() >>> cmp.dist_abs('cat', 'hat') 2 >>> cmp.dist_abs('Niall', 'Neil') 3 >>> cmp.dist_abs('Colin', 'Cuilen') 5 >>> cmp.dist_abs('ATCG', 'TAGC') 4 """ return self._lev.dist_abs( src, tar, mode='lev', cost=(1, 1, 9999, 9999) )
python
{ "resource": "" }
q258154
Indel.dist
validation
def dist(self, src, tar): """Return the normalized indel distance between two strings. This is equivalent to normalized Levenshtein distance, when only inserts and deletes are possible. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Normalized indel distance Examples -------- >>> cmp = Indel() >>> round(cmp.dist('cat', 'hat'), 12) 0.333333333333 >>> round(cmp.dist('Niall', 'Neil'), 12) 0.333333333333 >>> round(cmp.dist('Colin', 'Cuilen'), 12) 0.454545454545 >>> cmp.dist('ATCG', 'TAGC') 0.5 """ if src == tar: return 0.0 return self.dist_abs(src, tar) / (len(src) + len(tar))
python
{ "resource": "" }
q258155
_Distance.sim
validation
def sim(self, src, tar, *args, **kwargs): """Return similarity. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison *args Variable length argument list. **kwargs Arbitrary keyword arguments. Returns ------- float Similarity """ return 1.0 - self.dist(src, tar, *args, **kwargs)
python
{ "resource": "" }
q258156
_Distance.dist_abs
validation
def dist_abs(self, src, tar, *args, **kwargs): """Return absolute distance. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison *args Variable length argument list. **kwargs Arbitrary keyword arguments. Returns ------- int Absolute distance """ return self.dist(src, tar, *args, **kwargs)
python
{ "resource": "" }
q258157
dist_baystat
validation
def dist_baystat(src, tar, min_ss_len=None, left_ext=None, right_ext=None): """Return the Baystat distance. This is a wrapper for :py:meth:`Baystat.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison min_ss_len : int Minimum substring length to be considered left_ext : int Left-side extension length right_ext : int Right-side extension length Returns ------- float The Baystat distance Examples -------- >>> round(dist_baystat('cat', 'hat'), 12) 0.333333333333 >>> dist_baystat('Niall', 'Neil') 0.6 >>> round(dist_baystat('Colin', 'Cuilen'), 12) 0.833333333333 >>> dist_baystat('ATCG', 'TAGC') 1.0 """ return Baystat().dist(src, tar, min_ss_len, left_ext, right_ext)
python
{ "resource": "" }
q258158
dist_tversky
validation
def dist_tversky(src, tar, qval=2, alpha=1, beta=1, bias=None): """Return the Tversky distance between two strings. This is a wrapper for :py:meth:`Tversky.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version alpha : float Tversky index parameter as described above beta : float Tversky index parameter as described above bias : float The symmetric Tversky index bias parameter Returns ------- float Tversky distance Examples -------- >>> dist_tversky('cat', 'hat') 0.6666666666666667 >>> dist_tversky('Niall', 'Neil') 0.7777777777777778 >>> dist_tversky('aluminum', 'Catalan') 0.9375 >>> dist_tversky('ATCG', 'TAGC') 1.0 """ return Tversky().dist(src, tar, qval, alpha, beta, bias)
python
{ "resource": "" }
q258159
LCSseq.lcsseq
validation
def lcsseq(self, src, tar): """Return the longest common subsequence of two strings. Based on the dynamic programming algorithm from http://rosettacode.org/wiki/Longest_common_subsequence :cite:`rosettacode:2018b`. This is licensed GFDL 1.2. Modifications include: conversion to a numpy array in place of a list of lists Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- str The longest common subsequence Examples -------- >>> sseq = LCSseq() >>> sseq.lcsseq('cat', 'hat') 'at' >>> sseq.lcsseq('Niall', 'Neil') 'Nil' >>> sseq.lcsseq('aluminum', 'Catalan') 'aln' >>> sseq.lcsseq('ATCG', 'TAGC') 'AC' """ lengths = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int) # row 0 and column 0 are initialized to 0 already for i, src_char in enumerate(src): for j, tar_char in enumerate(tar): if src_char == tar_char: lengths[i + 1, j + 1] = lengths[i, j] + 1 else: lengths[i + 1, j + 1] = max( lengths[i + 1, j], lengths[i, j + 1] ) # read the substring out from the matrix result = '' i, j = len(src), len(tar) while i != 0 and j != 0: if lengths[i, j] == lengths[i - 1, j]: i -= 1 elif lengths[i, j] == lengths[i, j - 1]: j -= 1 else: result = src[i - 1] + result i -= 1 j -= 1 return result
python
{ "resource": "" }
q258160
LCSseq.sim
validation
def sim(self, src, tar): r"""Return the longest common subsequence similarity of two strings. Longest common subsequence similarity (:math:`sim_{LCSseq}`). This employs the LCSseq function to derive a similarity metric: :math:`sim_{LCSseq}(s,t) = \frac{|LCSseq(s,t)|}{max(|s|, |t|)}` Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float LCSseq similarity Examples -------- >>> sseq = LCSseq() >>> sseq.sim('cat', 'hat') 0.6666666666666666 >>> sseq.sim('Niall', 'Neil') 0.6 >>> sseq.sim('aluminum', 'Catalan') 0.375 >>> sseq.sim('ATCG', 'TAGC') 0.5 """ if src == tar: return 1.0 elif not src or not tar: return 0.0 return len(self.lcsseq(src, tar)) / max(len(src), len(tar))
python
{ "resource": "" }
q258161
Prefix.sim
validation
def sim(self, src, tar): """Return the prefix similarity of two strings. Prefix similarity is the ratio of the length of the shorter term that exactly matches the longer term to the length of the shorter term, beginning at the start of both terms. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Prefix similarity Examples -------- >>> cmp = Prefix() >>> cmp.sim('cat', 'hat') 0.0 >>> cmp.sim('Niall', 'Neil') 0.25 >>> cmp.sim('aluminum', 'Catalan') 0.0 >>> cmp.sim('ATCG', 'TAGC') 0.0 """ if src == tar: return 1.0 if not src or not tar: return 0.0 min_word, max_word = (src, tar) if len(src) < len(tar) else (tar, src) min_len = len(min_word) for i in range(min_len, 0, -1): if min_word[:i] == max_word[:i]: return i / min_len return 0.0
python
{ "resource": "" }
q258162
Corpus.docs_of_words
validation
def docs_of_words(self): r"""Return the docs in the corpus, with sentences flattened. Each list within the corpus represents all the words of that document. Thus the sentence level of lists has been flattened. Returns ------- [[str]] The docs in the corpus as a list of list of strs Example ------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> corp = Corpus(tqbf) >>> corp.docs_of_words() [['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog.', 'And', 'then', 'it', 'slept.', 'And', 'the', 'dog', 'ran', 'off.']] >>> len(corp.docs_of_words()) 1 """ return [ [words for sents in doc for words in sents] for doc in self.corpus ]
python
{ "resource": "" }
q258163
Corpus.raw
validation
def raw(self): r"""Return the raw corpus. This is reconstructed by joining sub-components with the corpus' split characters Returns ------- str The raw corpus Example ------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n' >>> tqbf += 'And then it slept.\n And the dog ran off.' >>> corp = Corpus(tqbf) >>> print(corp.raw()) The quick brown fox jumped over the lazy dog. And then it slept. And the dog ran off. >>> len(corp.raw()) 85 """ doc_list = [] for doc in self.corpus: sent_list = [] for sent in doc: sent_list.append(' '.join(sent)) doc_list.append(self.sent_split.join(sent_list)) del sent_list return self.doc_split.join(doc_list)
python
{ "resource": "" }
q258164
Corpus.idf
validation
def idf(self, term, transform=None): r"""Calculate the Inverse Document Frequency of a term in the corpus. Parameters ---------- term : str The term to calculate the IDF of transform : function A function to apply to each document term before checking for the presence of term Returns ------- float The IDF Examples -------- >>> tqbf = 'The quick brown fox jumped over the lazy dog.\n\n' >>> tqbf += 'And then it slept.\n\n And the dog ran off.' >>> corp = Corpus(tqbf) >>> print(corp.docs()) [[['The', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog.']], [['And', 'then', 'it', 'slept.']], [['And', 'the', 'dog', 'ran', 'off.']]] >>> round(corp.idf('dog'), 10) 0.4771212547 >>> round(corp.idf('the'), 10) 0.1760912591 """ docs_with_term = 0 docs = self.docs_of_words() for doc in docs: doc_set = set(doc) if transform: transformed_doc = [] for word in doc_set: transformed_doc.append(transform(word)) doc_set = set(transformed_doc) if term in doc_set: docs_with_term += 1 if docs_with_term == 0: return float('inf') return log10(len(docs) / docs_with_term)
python
{ "resource": "" }
q258165
PaiceHusk.stem
validation
def stem(self, word): """Return Paice-Husk stem. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = PaiceHusk() >>> stmr.stem('assumption') 'assum' >>> stmr.stem('verifiable') 'ver' >>> stmr.stem('fancies') 'fant' >>> stmr.stem('fanciful') 'fancy' >>> stmr.stem('torment') 'tor' """ terminate = False intact = True while not terminate: for n in range(6, 0, -1): if word[-n:] in self._rule_table[n]: accept = False if len(self._rule_table[n][word[-n:]]) < 4: for rule in self._rule_table[n][word[-n:]]: ( word, accept, intact, terminate, ) = self._apply_rule(word, rule, intact, terminate) if accept: break else: rule = self._rule_table[n][word[-n:]] (word, accept, intact, terminate) = self._apply_rule( word, rule, intact, terminate ) if accept: break else: break return word
python
{ "resource": "" }
q258166
BeiderMorse._language
validation
def _language(self, name, name_mode): """Return the best guess language ID for the word and language choices. Parameters ---------- name : str The term to guess the language of name_mode : str The name mode of the algorithm: ``gen`` (default), ``ash`` (Ashkenazi), or ``sep`` (Sephardic) Returns ------- int Language ID """ name = name.strip().lower() rules = BMDATA[name_mode]['language_rules'] all_langs = ( sum(_LANG_DICT[_] for _ in BMDATA[name_mode]['languages']) - 1 ) choices_remaining = all_langs for rule in rules: letters, languages, accept = rule if search(letters, name) is not None: if accept: choices_remaining &= languages else: choices_remaining &= (~languages) % (all_langs + 1) if choices_remaining == L_NONE: choices_remaining = L_ANY return choices_remaining
python
{ "resource": "" }
q258167
BeiderMorse._redo_language
validation
def _redo_language( self, term, name_mode, rules, final_rules1, final_rules2, concat ): """Reassess the language of the terms and call the phonetic encoder. Uses a split multi-word term. Parameters ---------- term : str The term to encode via Beider-Morse name_mode : str The name mode of the algorithm: ``gen`` (default), ``ash`` (Ashkenazi), or ``sep`` (Sephardic) rules : tuple The set of initial phonetic transform regexps final_rules1 : tuple The common set of final phonetic transform regexps final_rules2 : tuple The specific set of final phonetic transform regexps concat : bool A flag to indicate concatenation Returns ------- str A Beider-Morse phonetic code """ language_arg = self._language(term, name_mode) return self._phonetic( term, name_mode, rules, final_rules1, final_rules2, language_arg, concat, )
python
{ "resource": "" }
q258168
BeiderMorse._apply_final_rules
validation
def _apply_final_rules(self, phonetic, final_rules, language_arg, strip): """Apply a set of final rules to the phonetic encoding. Parameters ---------- phonetic : str The term to which to apply the final rules final_rules : tuple The set of final phonetic transform regexps language_arg : int An integer representing the target language of the phonetic encoding strip : bool Flag to indicate whether to normalize the language attributes Returns ------- str A Beider-Morse phonetic code """ # optimization to save time if not final_rules: return phonetic # expand the result phonetic = self._expand_alternates(phonetic) phonetic_array = phonetic.split('|') for k in range(len(phonetic_array)): phonetic = phonetic_array[k] phonetic2 = '' phoneticx = self._normalize_lang_attrs(phonetic, True) i = 0 while i < len(phonetic): found = False if phonetic[i] == '[': # skip over language attribute attrib_start = i i += 1 while True: if phonetic[i] == ']': i += 1 phonetic2 += phonetic[attrib_start:i] break i += 1 continue for rule in final_rules: pattern = rule[_PATTERN_POS] pattern_length = len(pattern) lcontext = rule[_LCONTEXT_POS] rcontext = rule[_RCONTEXT_POS] right = '^' + rcontext left = lcontext + '$' # check to see if next sequence in phonetic matches the # string in the rule if (pattern_length > len(phoneticx) - i) or phoneticx[ i : i + pattern_length ] != pattern: continue # check that right context is satisfied if rcontext != '': if not search(right, phoneticx[i + pattern_length :]): continue # check that left context is satisfied if lcontext != '': if not search(left, phoneticx[:i]): continue # check for incompatible attributes candidate = self._apply_rule_if_compat( phonetic2, rule[_PHONETIC_POS], language_arg ) # The below condition shouldn't ever be false if candidate is not None: # pragma: no branch phonetic2 = candidate found = True break if not found: # character in name for which there is no substitution in # the table phonetic2 += phonetic[i] pattern_length = 1 i += pattern_length phonetic_array[k] = self._expand_alternates(phonetic2) phonetic = '|'.join(phonetic_array) if strip: phonetic = self._normalize_lang_attrs(phonetic, True) if '|' in phonetic: phonetic = '(' + self._remove_dupes(phonetic) + ')' return phonetic
python
{ "resource": "" }
q258169
BeiderMorse._expand_alternates
validation
def _expand_alternates(self, phonetic): """Expand phonetic alternates separated by |s. Parameters ---------- phonetic : str A Beider-Morse phonetic encoding Returns ------- str A Beider-Morse phonetic code """ alt_start = phonetic.find('(') if alt_start == -1: return self._normalize_lang_attrs(phonetic, False) prefix = phonetic[:alt_start] alt_start += 1 # get past the ( alt_end = phonetic.find(')', alt_start) alt_string = phonetic[alt_start:alt_end] alt_end += 1 # get past the ) suffix = phonetic[alt_end:] alt_array = alt_string.split('|') result = '' for i in range(len(alt_array)): alt = alt_array[i] alternate = self._expand_alternates(prefix + alt + suffix) if alternate != '' and alternate != '[0]': if result != '': result += '|' result += alternate return result
python
{ "resource": "" }
q258170
BeiderMorse._pnums_with_leading_space
validation
def _pnums_with_leading_space(self, phonetic): """Join prefixes & suffixes in cases of alternate phonetic values. Parameters ---------- phonetic : str A Beider-Morse phonetic encoding Returns ------- str A Beider-Morse phonetic code """ alt_start = phonetic.find('(') if alt_start == -1: return ' ' + self._phonetic_number(phonetic) prefix = phonetic[:alt_start] alt_start += 1 # get past the ( alt_end = phonetic.find(')', alt_start) alt_string = phonetic[alt_start:alt_end] alt_end += 1 # get past the ) suffix = phonetic[alt_end:] alt_array = alt_string.split('|') result = '' for alt in alt_array: result += self._pnums_with_leading_space(prefix + alt + suffix) return result
python
{ "resource": "" }
q258171
BeiderMorse._phonetic_numbers
validation
def _phonetic_numbers(self, phonetic): """Prepare & join phonetic numbers. Split phonetic value on '-', run through _pnums_with_leading_space, and join with ' ' Parameters ---------- phonetic : str A Beider-Morse phonetic encoding Returns ------- str A Beider-Morse phonetic code """ phonetic_array = phonetic.split('-') # for names with spaces in them result = ' '.join( [self._pnums_with_leading_space(i)[1:] for i in phonetic_array] ) return result
python
{ "resource": "" }
q258172
BeiderMorse._remove_dupes
validation
def _remove_dupes(self, phonetic): """Remove duplicates from a phonetic encoding list. Parameters ---------- phonetic : str A Beider-Morse phonetic encoding Returns ------- str A Beider-Morse phonetic code """ alt_string = phonetic alt_array = alt_string.split('|') result = '|' for i in range(len(alt_array)): alt = alt_array[i] if alt and '|' + alt + '|' not in result: result += alt + '|' return result[1:-1]
python
{ "resource": "" }
q258173
BeiderMorse._normalize_lang_attrs
validation
def _normalize_lang_attrs(self, text, strip): """Remove embedded bracketed attributes. This (potentially) bitwise-ands bracketed attributes together and adds to the end. This is applied to a single alternative at a time -- not to a parenthesized list. It removes all embedded bracketed attributes, logically-ands them together, and places them at the end. However if strip is true, this can indeed remove embedded bracketed attributes from a parenthesized list. Parameters ---------- text : str A Beider-Morse phonetic encoding (in progress) strip : bool Remove the bracketed attributes (and throw away) Returns ------- str A Beider-Morse phonetic code Raises ------ ValueError No closing square bracket """ uninitialized = -1 # all 1's attrib = uninitialized while '[' in text: bracket_start = text.find('[') bracket_end = text.find(']', bracket_start) if bracket_end == -1: raise ValueError( 'No closing square bracket: text=(' + text + ') strip=(' + text_type(strip) + ')' ) attrib &= int(text[bracket_start + 1 : bracket_end]) text = text[:bracket_start] + text[bracket_end + 1 :] if attrib == uninitialized or strip: return text elif attrib == 0: # means that the attributes were incompatible and there is no # alternative here return '[0]' return text + '[' + str(attrib) + ']'
python
{ "resource": "" }
q258174
BeiderMorse._apply_rule_if_compat
validation
def _apply_rule_if_compat(self, phonetic, target, language_arg): """Apply a phonetic regex if compatible. tests for compatible language rules to do so, apply the rule, expand the results, and detect alternatives with incompatible attributes then drop each alternative that has incompatible attributes and keep those that are compatible if there are no compatible alternatives left, return false otherwise return the compatible alternatives apply the rule Parameters ---------- phonetic : str The Beider-Morse phonetic encoding (so far) target : str A proposed addition to the phonetic encoding language_arg : int An integer representing the target language of the phonetic encoding Returns ------- str A candidate encoding """ candidate = phonetic + target if '[' not in candidate: # no attributes so we need test no further return candidate # expand the result, converting incompatible attributes to [0] candidate = self._expand_alternates(candidate) candidate_array = candidate.split('|') # drop each alternative that has incompatible attributes candidate = '' found = False for i in range(len(candidate_array)): this_candidate = candidate_array[i] if language_arg != 1: this_candidate = self._normalize_lang_attrs( this_candidate + '[' + str(language_arg) + ']', False ) if this_candidate != '[0]': found = True if candidate: candidate += '|' candidate += this_candidate # return false if no compatible alternatives remain if not found: return None # return the result of applying the rule if '|' in candidate: candidate = '(' + candidate + ')' return candidate
python
{ "resource": "" }
q258175
BeiderMorse._language_index_from_code
validation
def _language_index_from_code(self, code, name_mode): """Return the index value for a language code. This returns l_any if more than one code is specified or the code is out of bounds. Parameters ---------- code : int The language code to interpret name_mode : str The name mode of the algorithm: ``gen`` (default), ``ash`` (Ashkenazi), or ``sep`` (Sephardic) Returns ------- int Language code index """ if code < 1 or code > sum( _LANG_DICT[_] for _ in BMDATA[name_mode]['languages'] ): # code out of range return L_ANY if ( code & (code - 1) ) != 0: # choice was more than one language; use any return L_ANY return code
python
{ "resource": "" }
q258176
dist_strcmp95
validation
def dist_strcmp95(src, tar, long_strings=False): """Return the strcmp95 distance between two strings. This is a wrapper for :py:meth:`Strcmp95.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison long_strings : bool Set to True to increase the probability of a match when the number of matched characters is large. This option allows for a little more tolerance when the strings are large. It is not an appropriate test when comparing fixed length fields such as phone and social security numbers. Returns ------- float Strcmp95 distance Examples -------- >>> round(dist_strcmp95('cat', 'hat'), 12) 0.222222222222 >>> round(dist_strcmp95('Niall', 'Neil'), 12) 0.1545 >>> round(dist_strcmp95('aluminum', 'Catalan'), 12) 0.345238095238 >>> round(dist_strcmp95('ATCG', 'TAGC'), 12) 0.166666666667 """ return Strcmp95().dist(src, tar, long_strings)
python
{ "resource": "" }
q258177
NRL.encode
validation
def encode(self, word): """Return the Naval Research Laboratory phonetic encoding of a word. Parameters ---------- word : str The word to transform Returns ------- str The NRL phonetic encoding Examples -------- >>> pe = NRL() >>> pe.encode('the') 'DHAX' >>> pe.encode('round') 'rAWnd' >>> pe.encode('quick') 'kwIHk' >>> pe.encode('eaten') 'IYtEHn' >>> pe.encode('Smith') 'smIHTH' >>> pe.encode('Larsen') 'lAArsEHn' """ def _to_regex(pattern, left_match=True): new_pattern = '' replacements = { '#': '[AEIOU]+', ':': '[BCDFGHJKLMNPQRSTVWXYZ]*', '^': '[BCDFGHJKLMNPQRSTVWXYZ]', '.': '[BDVGJLMNTWZ]', '%': '(ER|E|ES|ED|ING|ELY)', '+': '[EIY]', ' ': '^', } for char in pattern: new_pattern += ( replacements[char] if char in replacements else char ) if left_match: new_pattern += '$' if '^' not in pattern: new_pattern = '^.*' + new_pattern else: new_pattern = '^' + new_pattern.replace('^', '$') if '$' not in new_pattern: new_pattern += '.*$' return new_pattern word = word.upper() pron = '' pos = 0 while pos < len(word): left_orig = word[:pos] right_orig = word[pos:] first = word[pos] if word[pos] in self._rules else ' ' for rule in self._rules[first]: left, match, right, out = rule if right_orig.startswith(match): if left: l_pattern = _to_regex(left, left_match=True) if right: r_pattern = _to_regex(right, left_match=False) if (not left or re_match(l_pattern, left_orig)) and ( not right or re_match(r_pattern, right_orig[len(match) :]) ): pron += out pos += len(match) break else: pron += word[pos] pos += 1 return pron
python
{ "resource": "" }
q258178
LCSstr.lcsstr
validation
def lcsstr(self, src, tar): """Return the longest common substring of two strings. Longest common substring (LCSstr). Based on the code from https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring :cite:`Wikibooks:2018`. This is licensed Creative Commons: Attribution-ShareAlike 3.0. Modifications include: - conversion to a numpy array in place of a list of lists - conversion to Python 2/3-safe range from xrange via six Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- str The longest common substring Examples -------- >>> sstr = LCSstr() >>> sstr.lcsstr('cat', 'hat') 'at' >>> sstr.lcsstr('Niall', 'Neil') 'N' >>> sstr.lcsstr('aluminum', 'Catalan') 'al' >>> sstr.lcsstr('ATCG', 'TAGC') 'A' """ lengths = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_int) longest, i_longest = 0, 0 for i in range(1, len(src) + 1): for j in range(1, len(tar) + 1): if src[i - 1] == tar[j - 1]: lengths[i, j] = lengths[i - 1, j - 1] + 1 if lengths[i, j] > longest: longest = lengths[i, j] i_longest = i else: lengths[i, j] = 0 return src[i_longest - longest : i_longest]
python
{ "resource": "" }
q258179
LCSstr.sim
validation
def sim(self, src, tar): r"""Return the longest common substring similarity of two strings. Longest common substring similarity (:math:`sim_{LCSstr}`). This employs the LCS function to derive a similarity metric: :math:`sim_{LCSstr}(s,t) = \frac{|LCSstr(s,t)|}{max(|s|, |t|)}` Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float LCSstr similarity Examples -------- >>> sim_lcsstr('cat', 'hat') 0.6666666666666666 >>> sim_lcsstr('Niall', 'Neil') 0.2 >>> sim_lcsstr('aluminum', 'Catalan') 0.25 >>> sim_lcsstr('ATCG', 'TAGC') 0.25 """ if src == tar: return 1.0 elif not src or not tar: return 0.0 return len(self.lcsstr(src, tar)) / max(len(src), len(tar))
python
{ "resource": "" }
q258180
needleman_wunsch
validation
def needleman_wunsch(src, tar, gap_cost=1, sim_func=sim_ident): """Return the Needleman-Wunsch score of two strings. This is a wrapper for :py:meth:`NeedlemanWunsch.dist_abs`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison gap_cost : float The cost of an alignment gap (1 by default) sim_func : function A function that returns the similarity of two characters (identity similarity by default) Returns ------- float Needleman-Wunsch score Examples -------- >>> needleman_wunsch('cat', 'hat') 2.0 >>> needleman_wunsch('Niall', 'Neil') 1.0 >>> needleman_wunsch('aluminum', 'Catalan') -1.0 >>> needleman_wunsch('ATCG', 'TAGC') 0.0 """ return NeedlemanWunsch().dist_abs(src, tar, gap_cost, sim_func)
python
{ "resource": "" }
q258181
NeedlemanWunsch.sim_matrix
validation
def sim_matrix( src, tar, mat=None, mismatch_cost=0, match_cost=1, symmetric=True, alphabet=None, ): """Return the matrix similarity of two strings. With the default parameters, this is identical to sim_ident. It is possible for sim_matrix to return values outside of the range :math:`[0, 1]`, if values outside that range are present in mat, mismatch_cost, or match_cost. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison mat : dict A dict mapping tuples to costs; the tuples are (src, tar) pairs of symbols from the alphabet parameter mismatch_cost : float The value returned if (src, tar) is absent from mat when src does not equal tar match_cost : float The value returned if (src, tar) is absent from mat when src equals tar symmetric : bool True if the cost of src not matching tar is identical to the cost of tar not matching src; in this case, the values in mat need only contain (src, tar) or (tar, src), not both alphabet : str A collection of tokens from which src and tar are drawn; if this is defined a ValueError is raised if either tar or src is not found in alphabet Returns ------- float Matrix similarity Raises ------ ValueError src value not in alphabet ValueError tar value not in alphabet Examples -------- >>> NeedlemanWunsch.sim_matrix('cat', 'hat') 0 >>> NeedlemanWunsch.sim_matrix('hat', 'hat') 1 """ if alphabet: alphabet = tuple(alphabet) for i in src: if i not in alphabet: raise ValueError('src value not in alphabet') for i in tar: if i not in alphabet: raise ValueError('tar value not in alphabet') if src == tar: if mat and (src, src) in mat: return mat[(src, src)] return match_cost if mat and (src, tar) in mat: return mat[(src, tar)] elif symmetric and mat and (tar, src) in mat: return mat[(tar, src)] return mismatch_cost
python
{ "resource": "" }
q258182
PhoneticSpanish.encode
validation
def encode(self, word, max_length=-1): """Return the PhoneticSpanish coding of word. Parameters ---------- word : str The word to transform max_length : int The length of the code returned (defaults to unlimited) Returns ------- str The PhoneticSpanish code Examples -------- >>> pe = PhoneticSpanish() >>> pe.encode('Perez') '094' >>> pe.encode('Martinez') '69364' >>> pe.encode('Gutierrez') '83994' >>> pe.encode('Santiago') '4638' >>> pe.encode('Nicolás') '6454' """ # uppercase, normalize, and decompose, filter to A-Z minus vowels & W word = unicode_normalize('NFKD', text_type(word.upper())) word = ''.join(c for c in word if c in self._uc_set) # merge repeated Ls & Rs word = word.replace('LL', 'L') word = word.replace('R', 'R') # apply the Soundex algorithm sdx = word.translate(self._trans) if max_length > 0: sdx = (sdx + ('0' * max_length))[:max_length] return sdx
python
{ "resource": "" }
q258183
NCDbwtrle.dist
validation
def dist(self, src, tar): """Return the NCD between two strings using BWT plus RLE. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Compression distance Examples -------- >>> cmp = NCDbwtrle() >>> cmp.dist('cat', 'hat') 0.75 >>> cmp.dist('Niall', 'Neil') 0.8333333333333334 >>> cmp.dist('aluminum', 'Catalan') 1.0 >>> cmp.dist('ATCG', 'TAGC') 0.8 """ if src == tar: return 0.0 src_comp = self._rle.encode(self._bwt.encode(src)) tar_comp = self._rle.encode(self._bwt.encode(tar)) concat_comp = self._rle.encode(self._bwt.encode(src + tar)) concat_comp2 = self._rle.encode(self._bwt.encode(tar + src)) return ( min(len(concat_comp), len(concat_comp2)) - min(len(src_comp), len(tar_comp)) ) / max(len(src_comp), len(tar_comp))
python
{ "resource": "" }
q258184
ConfusionTable.to_tuple
validation
def to_tuple(self): """Cast to tuple. Returns ------- tuple The confusion table as a 4-tuple (tp, tn, fp, fn) Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.to_tuple() (120, 60, 20, 30) """ return self._tp, self._tn, self._fp, self._fn
python
{ "resource": "" }
q258185
ConfusionTable.to_dict
validation
def to_dict(self): """Cast to dict. Returns ------- dict The confusion table as a dict Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> import pprint >>> pprint.pprint(ct.to_dict()) {'fn': 30, 'fp': 20, 'tn': 60, 'tp': 120} """ return {'tp': self._tp, 'tn': self._tn, 'fp': self._fp, 'fn': self._fn}
python
{ "resource": "" }
q258186
ConfusionTable.population
validation
def population(self): """Return population, N. Returns ------- int The population (N) of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.population() 230 """ return self._tp + self._tn + self._fp + self._fn
python
{ "resource": "" }
q258187
ConfusionTable.precision
validation
def precision(self): r"""Return precision. Precision is defined as :math:`\frac{tp}{tp + fp}` AKA positive predictive value (PPV) Cf. https://en.wikipedia.org/wiki/Precision_and_recall Cf. https://en.wikipedia.org/wiki/Information_retrieval#Precision Returns ------- float The precision of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.precision() 0.8571428571428571 """ if self._tp + self._fp == 0: return float('NaN') return self._tp / (self._tp + self._fp)
python
{ "resource": "" }
q258188
ConfusionTable.precision_gain
validation
def precision_gain(self): r"""Return gain in precision. The gain in precision is defined as: :math:`G(precision) = \frac{precision}{random~ precision}` Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval) Returns ------- float The gain in precision of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.precision_gain() 1.3142857142857143 """ if self.population() == 0: return float('NaN') random_precision = self.cond_pos_pop() / self.population() return self.precision() / random_precision
python
{ "resource": "" }
q258189
ConfusionTable.recall
validation
def recall(self): r"""Return recall. Recall is defined as :math:`\frac{tp}{tp + fn}` AKA sensitivity AKA true positive rate (TPR) Cf. https://en.wikipedia.org/wiki/Precision_and_recall Cf. https://en.wikipedia.org/wiki/Sensitivity_(test) Cf. https://en.wikipedia.org/wiki/Information_retrieval#Recall Returns ------- float The recall of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.recall() 0.8 """ if self._tp + self._fn == 0: return float('NaN') return self._tp / (self._tp + self._fn)
python
{ "resource": "" }
q258190
ConfusionTable.specificity
validation
def specificity(self): r"""Return specificity. Specificity is defined as :math:`\frac{tn}{tn + fp}` AKA true negative rate (TNR) Cf. https://en.wikipedia.org/wiki/Specificity_(tests) Returns ------- float The specificity of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.specificity() 0.75 """ if self._tn + self._fp == 0: return float('NaN') return self._tn / (self._tn + self._fp)
python
{ "resource": "" }
q258191
ConfusionTable.fallout
validation
def fallout(self): r"""Return fall-out. Fall-out is defined as :math:`\frac{fp}{fp + tn}` AKA false positive rate (FPR) Cf. https://en.wikipedia.org/wiki/Information_retrieval#Fall-out Returns ------- float The fall-out of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.fallout() 0.25 """ if self._fp + self._tn == 0: return float('NaN') return self._fp / (self._fp + self._tn)
python
{ "resource": "" }
q258192
ConfusionTable.accuracy
validation
def accuracy(self): r"""Return accuracy. Accuracy is defined as :math:`\frac{tp + tn}{population}` Cf. https://en.wikipedia.org/wiki/Accuracy Returns ------- float The accuracy of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.accuracy() 0.782608695652174 """ if self.population() == 0: return float('NaN') return (self._tp + self._tn) / self.population()
python
{ "resource": "" }
q258193
ConfusionTable.accuracy_gain
validation
def accuracy_gain(self): r"""Return gain in accuracy. The gain in accuracy is defined as: :math:`G(accuracy) = \frac{accuracy}{random~ accuracy}` Cf. https://en.wikipedia.org/wiki/Gain_(information_retrieval) Returns ------- float The gain in accuracy of the confusion table Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.accuracy_gain() 1.4325259515570934 """ if self.population() == 0: return float('NaN') random_accuracy = (self.cond_pos_pop() / self.population()) ** 2 + ( self.cond_neg_pop() / self.population() ) ** 2 return self.accuracy() / random_accuracy
python
{ "resource": "" }
q258194
ConfusionTable.pr_lmean
validation
def pr_lmean(self): r"""Return logarithmic mean of precision & recall. The logarithmic mean is: 0 if either precision or recall is 0, the precision if they are equal, otherwise :math:`\frac{precision - recall} {ln(precision) - ln(recall)}` Cf. https://en.wikipedia.org/wiki/Logarithmic_mean Returns ------- float The logarithmic mean of the confusion table's precision & recall Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.pr_lmean() 0.8282429171492667 """ precision = self.precision() recall = self.recall() if not precision or not recall: return 0.0 elif precision == recall: return precision return (precision - recall) / (math.log(precision) - math.log(recall))
python
{ "resource": "" }
q258195
CLEFGerman.stem
validation
def stem(self, word): """Return CLEF German stem. Parameters ---------- word : str The word to stem Returns ------- str Word stem Examples -------- >>> stmr = CLEFGerman() >>> stmr.stem('lesen') 'lese' >>> stmr.stem('graues') 'grau' >>> stmr.stem('buchstabieren') 'buchstabier' """ # lowercase, normalize, and compose word = normalize('NFC', text_type(word.lower())) # remove umlauts word = word.translate(self._umlauts) # remove plurals wlen = len(word) - 1 if wlen > 3: if wlen > 5: if word[-3:] == 'nen': return word[:-3] if wlen > 4: if word[-2:] in {'en', 'se', 'es', 'er'}: return word[:-2] if word[-1] in {'e', 'n', 'r', 's'}: return word[:-1] return word
python
{ "resource": "" }
q258196
Sift4Simplest.dist_abs
validation
def dist_abs(self, src, tar, max_offset=5): """Return the "simplest" Sift4 distance between two terms. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison max_offset : int The number of characters to search for matching letters Returns ------- int The Sift4 distance according to the simplest formula Examples -------- >>> cmp = Sift4Simplest() >>> cmp.dist_abs('cat', 'hat') 1 >>> cmp.dist_abs('Niall', 'Neil') 2 >>> cmp.dist_abs('Colin', 'Cuilen') 3 >>> cmp.dist_abs('ATCG', 'TAGC') 2 """ if not src: return len(tar) if not tar: return len(src) src_len = len(src) tar_len = len(tar) src_cur = 0 tar_cur = 0 lcss = 0 local_cs = 0 while (src_cur < src_len) and (tar_cur < tar_len): if src[src_cur] == tar[tar_cur]: local_cs += 1 else: lcss += local_cs local_cs = 0 if src_cur != tar_cur: src_cur = tar_cur = max(src_cur, tar_cur) for i in range(max_offset): if not ( (src_cur + i < src_len) or (tar_cur + i < tar_len) ): break if (src_cur + i < src_len) and ( src[src_cur + i] == tar[tar_cur] ): src_cur += i local_cs += 1 break if (tar_cur + i < tar_len) and ( src[src_cur] == tar[tar_cur + i] ): tar_cur += i local_cs += 1 break src_cur += 1 tar_cur += 1 lcss += local_cs return round(max(src_len, tar_len) - lcss)
python
{ "resource": "" }
q258197
sim_typo
validation
def sim_typo( src, tar, metric='euclidean', cost=(1, 1, 0.5, 0.5), layout='QWERTY' ): """Return the normalized typo similarity between two strings. This is a wrapper for :py:meth:`Typo.sim`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison metric : str Supported values include: ``euclidean``, ``manhattan``, ``log-euclidean``, and ``log-manhattan`` cost : tuple A 4-tuple representing the cost of the four possible edits: inserts, deletes, substitutions, and shift, respectively (by default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be significantly less than the cost of an insertion & deletion unless a log metric is used. layout : str Name of the keyboard layout to use (Currently supported: ``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``) Returns ------- float Normalized typo similarity Examples -------- >>> round(sim_typo('cat', 'hat'), 12) 0.472953716914 >>> round(sim_typo('Niall', 'Neil'), 12) 0.434971857071 >>> round(sim_typo('Colin', 'Cuilen'), 12) 0.430964390437 >>> sim_typo('ATCG', 'TAGC') 0.375 """ return Typo().sim(src, tar, metric, cost, layout)
python
{ "resource": "" }
q258198
manhattan
validation
def manhattan(src, tar, qval=2, normalized=False, alphabet=None): """Return the Manhattan distance between two strings. This is a wrapper for :py:meth:`Manhattan.dist_abs`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version normalized : bool Normalizes to [0, 1] if True alphabet : collection or int The values or size of the alphabet Returns ------- float The Manhattan distance Examples -------- >>> manhattan('cat', 'hat') 4.0 >>> manhattan('Niall', 'Neil') 7.0 >>> manhattan('Colin', 'Cuilen') 9.0 >>> manhattan('ATCG', 'TAGC') 10.0 """ return Manhattan().dist_abs(src, tar, qval, normalized, alphabet)
python
{ "resource": "" }
q258199
dist_manhattan
validation
def dist_manhattan(src, tar, qval=2, alphabet=None): """Return the normalized Manhattan distance between two strings. This is a wrapper for :py:meth:`Manhattan.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version alphabet : collection or int The values or size of the alphabet Returns ------- float The normalized Manhattan distance Examples -------- >>> dist_manhattan('cat', 'hat') 0.5 >>> round(dist_manhattan('Niall', 'Neil'), 12) 0.636363636364 >>> round(dist_manhattan('Colin', 'Cuilen'), 12) 0.692307692308 >>> dist_manhattan('ATCG', 'TAGC') 1.0 """ return Manhattan().dist(src, tar, qval, alphabet)
python
{ "resource": "" }