repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
ppb/pursuedpybear
ppb/scenes.py
BaseScene.get
python
def get(self, *, kind: Type=None, tag: Hashable=None, **kwargs) -> Iterator: return self.game_objects.get(kind=kind, tag=tag, **kwargs)
Get an iterator of GameObjects by kind or tag. kind: Any type. Pass to get a subset of contained GameObjects with the given type. tag: Any Hashable object. Pass to get a subset of contained GameObjects with the given tag. Pass both kind and tag to get objects that are both that type and that tag. Examples: scene.get(type=MyGameObject) scene.get(tag="red") scene.get(type=MyGameObject, tag="red")
train
https://github.com/ppb/pursuedpybear/blob/db3bfaaf86d14b4d1bb9e0b24cc8dc63f29c2191/ppb/scenes.py#L171-L190
null
class BaseScene(Scene, EventMixin): # Background color, in RGB, each channel is 0-255 background_color: Sequence[int] = (0, 0, 100) container_class: Type = GameObjectCollection def __init__(self, engine, *, set_up: Callable=None, pixel_ratio: Number=64, **kwargs): super().__init__(engine) for k, v in kwargs.items(): setattr(self, k, v) self.game_objects = self.container_class() self.main_camera = Camera(pixel_ratio=pixel_ratio) if set_up is not None: set_up(self) def __contains__(self, item: Hashable) -> bool: return item in self.game_objects def __iter__(self) -> Iterator: return (x for x in self.game_objects) @property def kinds(self): return self.game_objects.kinds @property def tags(self): return self.game_objects.tags @property def main_camera(self) -> Camera: return next(self.game_objects.get(tag="main_camera")) @main_camera.setter def main_camera(self, value: Camera): for camera in self.game_objects.get(tag="main_camera"): self.game_objects.remove(camera) self.game_objects.add(value, tags=["main_camera"]) def change(self) -> Tuple[bool, dict]: """ Default case, override in subclass as necessary. """ next = self.next self.next = None if self.next or not self.running: message = "The Scene.change interface is deprecated. Use the events commands instead." warn(message, DeprecationWarning) return self.running, {"scene_class": next} def add(self, game_object: Hashable, tags: Iterable=())-> None: """ Add a game_object to the scene. game_object: Any GameObject object. The item to be added. tags: An iterable of Hashable objects. Values that can be used to retrieve a group containing the game_object. Examples: scene.add(MyGameObject()) scene.add(MyGameObject(), tags=("red", "blue") """ self.game_objects.add(game_object, tags) def remove(self, game_object: Hashable) -> None: """ Remove the given object from the scene. game_object: A game object. Example: scene.remove(my_game_object) """ self.game_objects.remove(game_object)
ppb/pursuedpybear
setup.py
requirements
python
def requirements(section=None): if section is None: filename = "requirements.txt" else: filename = f"requirements-{section}.txt" with open(filename) as file: return [line.strip() for line in file]
Helper for loading dependencies from requirements files.
train
https://github.com/ppb/pursuedpybear/blob/db3bfaaf86d14b4d1bb9e0b24cc8dc63f29c2191/setup.py#L5-L13
null
#!/usr/bin/env python3 from setuptools import setup # See setup.cfg for the actual configuration. setup( install_requires=requirements(), tests_require=requirements('tests'), )
iKevinY/EulerPy
EulerPy/utils.py
problem_glob
python
def problem_glob(extension='.py'): filenames = glob.glob('*[0-9][0-9][0-9]*{}'.format(extension)) return [ProblemFile(file) for file in filenames]
Returns ProblemFile objects for all valid problem files
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/utils.py#L12-L15
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals import sys import glob import math from EulerPy.problem import ProblemFile # Use the resource module instead of time.clock() if possible (on Unix) try: import resource except ImportError: import time def clock(): """ Under Windows, system CPU time can't be measured. Return time.clock() as user time and None as system time. """ return time.clock(), None else: def clock(): """ Returns a tuple (t_user, t_system) since the start of the process. This is done via a call to resource.getrusage, so it avoids the wraparound problems in time.clock(). """ return resource.getrusage(resource.RUSAGE_CHILDREN)[:2] def human_time(timespan, precision=3): """Formats the timespan in a human readable format""" if timespan >= 60.0: # Format time greater than one minute in a human-readable format # Idea from http://snipplr.com/view/5713/ def _format_long_time(time): suffixes = ('d', 'h', 'm', 's') lengths = (24*60*60, 60*60, 60, 1) for suffix, length in zip(suffixes, lengths): value = int(time / length) if value > 0: time %= length yield '%i%s' % (value, suffix) if time < 1: break return ' '.join(_format_long_time(timespan)) else: units = ['s', 'ms', 'us', 'ns'] # Attempt to replace 'us' with 'µs' if UTF-8 encoding has been set if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding == 'UTF-8': try: units[2] = b'\xc2\xb5s'.decode('utf-8') except UnicodeEncodeError: pass scale = [1.0, 1e3, 1e6, 1e9] if timespan > 0.0: # Determine scale of timespan (s = 0, ms = 1, µs = 2, ns = 3) order = min(-int(math.floor(math.log10(timespan)) // 3), 3) else: order = 3 return '%.*g %s' % (precision, timespan * scale[order], units[order]) def format_time(start, end): """Returns string with relevant time information formatted properly""" try: cpu_usr = end[0] - start[0] cpu_sys = end[1] - start[1] except TypeError: # `clock()[1] == None` so subtraction results in a TypeError return 'Time elapsed: {}'.format(human_time(cpu_usr)) else: times = (human_time(x) for x in (cpu_usr, cpu_sys, cpu_usr + cpu_sys)) return 'Time elapsed: user: {}, sys: {}, total: {}'.format(*times)
iKevinY/EulerPy
EulerPy/utils.py
human_time
python
def human_time(timespan, precision=3): if timespan >= 60.0: # Format time greater than one minute in a human-readable format # Idea from http://snipplr.com/view/5713/ def _format_long_time(time): suffixes = ('d', 'h', 'm', 's') lengths = (24*60*60, 60*60, 60, 1) for suffix, length in zip(suffixes, lengths): value = int(time / length) if value > 0: time %= length yield '%i%s' % (value, suffix) if time < 1: break return ' '.join(_format_long_time(timespan)) else: units = ['s', 'ms', 'us', 'ns'] # Attempt to replace 'us' with 'µs' if UTF-8 encoding has been set if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding == 'UTF-8': try: units[2] = b'\xc2\xb5s'.decode('utf-8') except UnicodeEncodeError: pass scale = [1.0, 1e3, 1e6, 1e9] if timespan > 0.0: # Determine scale of timespan (s = 0, ms = 1, µs = 2, ns = 3) order = min(-int(math.floor(math.log10(timespan)) // 3), 3) else: order = 3 return '%.*g %s' % (precision, timespan * scale[order], units[order])
Formats the timespan in a human readable format
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/utils.py#L41-L81
[ "def _format_long_time(time):\n suffixes = ('d', 'h', 'm', 's')\n lengths = (24*60*60, 60*60, 60, 1)\n\n for suffix, length in zip(suffixes, lengths):\n value = int(time / length)\n\n if value > 0:\n time %= length\n yield '%i%s' % (value, suffix)\n\n if time < 1:\n break\n" ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals import sys import glob import math from EulerPy.problem import ProblemFile def problem_glob(extension='.py'): """Returns ProblemFile objects for all valid problem files""" filenames = glob.glob('*[0-9][0-9][0-9]*{}'.format(extension)) return [ProblemFile(file) for file in filenames] # Use the resource module instead of time.clock() if possible (on Unix) try: import resource except ImportError: import time def clock(): """ Under Windows, system CPU time can't be measured. Return time.clock() as user time and None as system time. """ return time.clock(), None else: def clock(): """ Returns a tuple (t_user, t_system) since the start of the process. This is done via a call to resource.getrusage, so it avoids the wraparound problems in time.clock(). """ return resource.getrusage(resource.RUSAGE_CHILDREN)[:2] def format_time(start, end): """Returns string with relevant time information formatted properly""" try: cpu_usr = end[0] - start[0] cpu_sys = end[1] - start[1] except TypeError: # `clock()[1] == None` so subtraction results in a TypeError return 'Time elapsed: {}'.format(human_time(cpu_usr)) else: times = (human_time(x) for x in (cpu_usr, cpu_sys, cpu_usr + cpu_sys)) return 'Time elapsed: user: {}, sys: {}, total: {}'.format(*times)
iKevinY/EulerPy
EulerPy/utils.py
format_time
python
def format_time(start, end): try: cpu_usr = end[0] - start[0] cpu_sys = end[1] - start[1] except TypeError: # `clock()[1] == None` so subtraction results in a TypeError return 'Time elapsed: {}'.format(human_time(cpu_usr)) else: times = (human_time(x) for x in (cpu_usr, cpu_sys, cpu_usr + cpu_sys)) return 'Time elapsed: user: {}, sys: {}, total: {}'.format(*times)
Returns string with relevant time information formatted properly
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/utils.py#L84-L96
[ "def human_time(timespan, precision=3):\n \"\"\"Formats the timespan in a human readable format\"\"\"\n\n if timespan >= 60.0:\n # Format time greater than one minute in a human-readable format\n # Idea from http://snipplr.com/view/5713/\n def _format_long_time(time):\n suffixes = ('d', 'h', 'm', 's')\n lengths = (24*60*60, 60*60, 60, 1)\n\n for suffix, length in zip(suffixes, lengths):\n value = int(time / length)\n\n if value > 0:\n time %= length\n yield '%i%s' % (value, suffix)\n\n if time < 1:\n break\n\n return ' '.join(_format_long_time(timespan))\n\n else:\n units = ['s', 'ms', 'us', 'ns']\n\n # Attempt to replace 'us' with 'µs' if UTF-8 encoding has been set\n if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding == 'UTF-8':\n try:\n units[2] = b'\\xc2\\xb5s'.decode('utf-8')\n except UnicodeEncodeError:\n pass\n\n scale = [1.0, 1e3, 1e6, 1e9]\n\n if timespan > 0.0:\n # Determine scale of timespan (s = 0, ms = 1, µs = 2, ns = 3)\n order = min(-int(math.floor(math.log10(timespan)) // 3), 3)\n else:\n order = 3\n\n return '%.*g %s' % (precision, timespan * scale[order], units[order])\n" ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals import sys import glob import math from EulerPy.problem import ProblemFile def problem_glob(extension='.py'): """Returns ProblemFile objects for all valid problem files""" filenames = glob.glob('*[0-9][0-9][0-9]*{}'.format(extension)) return [ProblemFile(file) for file in filenames] # Use the resource module instead of time.clock() if possible (on Unix) try: import resource except ImportError: import time def clock(): """ Under Windows, system CPU time can't be measured. Return time.clock() as user time and None as system time. """ return time.clock(), None else: def clock(): """ Returns a tuple (t_user, t_system) since the start of the process. This is done via a call to resource.getrusage, so it avoids the wraparound problems in time.clock(). """ return resource.getrusage(resource.RUSAGE_CHILDREN)[:2] def human_time(timespan, precision=3): """Formats the timespan in a human readable format""" if timespan >= 60.0: # Format time greater than one minute in a human-readable format # Idea from http://snipplr.com/view/5713/ def _format_long_time(time): suffixes = ('d', 'h', 'm', 's') lengths = (24*60*60, 60*60, 60, 1) for suffix, length in zip(suffixes, lengths): value = int(time / length) if value > 0: time %= length yield '%i%s' % (value, suffix) if time < 1: break return ' '.join(_format_long_time(timespan)) else: units = ['s', 'ms', 'us', 'ns'] # Attempt to replace 'us' with 'µs' if UTF-8 encoding has been set if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding == 'UTF-8': try: units[2] = b'\xc2\xb5s'.decode('utf-8') except UnicodeEncodeError: pass scale = [1.0, 1e3, 1e6, 1e9] if timespan > 0.0: # Determine scale of timespan (s = 0, ms = 1, µs = 2, ns = 3) order = min(-int(math.floor(math.log10(timespan)) // 3), 3) else: order = 3 return '%.*g %s' % (precision, timespan * scale[order], units[order])
iKevinY/EulerPy
EulerPy/problem.py
Problem.filename
python
def filename(self, prefix='', suffix='', extension='.py'): return BASE_NAME.format(prefix, self.num, suffix, extension)
Returns filename padded with leading zeros
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/problem.py#L25-L27
null
class Problem(object): """Represents a Project Euler problem of a given problem number""" def __init__(self, problem_number): self.num = problem_number @property def glob(self): """Returns a sorted glob of files belonging to a given problem""" file_glob = glob.glob(BASE_NAME.format('*', self.num, '*', '.*')) # Sort globbed files by tuple (filename, extension) return sorted(file_glob, key=lambda f: os.path.splitext(f)) @property def file(self): """Returns a ProblemFile instance of the first matching file""" return ProblemFile(self.glob[0]) if self.glob else None @property def resources(self): """Returns a list of resources related to the problem (or None)""" with open(os.path.join(EULER_DATA, 'resources.json')) as data_file: data = json.load(data_file) problem_num = str(self.num) if problem_num in data: files = data[problem_num] # Ensure a list of files is returned return files if isinstance(files, list) else [files] else: return None def copy_resources(self): """Copies the relevant resources to a resources subdirectory""" if not os.path.isdir('resources'): os.mkdir('resources') resource_dir = os.path.join(os.getcwd(), 'resources', '') copied_resources = [] for resource in self.resources: src = os.path.join(EULER_DATA, 'resources', resource) if os.path.isfile(src): shutil.copy(src, resource_dir) copied_resources.append(resource) if copied_resources: copied = ', '.join(copied_resources) path = os.path.relpath(resource_dir, os.pardir) msg = "Copied {} to {}.".format(copied, path) click.secho(msg, fg='green') @property def solution(self): """Returns the answer to a given problem""" num = self.num solution_file = os.path.join(EULER_DATA, 'solutions.txt') solution_line = linecache.getline(solution_file, num) try: answer = solution_line.split('. ')[1].strip() except IndexError: answer = None if answer: return answer else: msg = 'Answer for problem %i not found in solutions.txt.' % num click.secho(msg, fg='red') click.echo('If you have an answer, consider submitting a pull ' 'request to EulerPy on GitHub.') sys.exit(1) @property def text(self): """Parses problems.txt and returns problem text""" def _problem_iter(problem_num): problem_file = os.path.join(EULER_DATA, 'problems.txt') with open(problem_file) as f: is_problem = False last_line = '' for line in f: if line.strip() == 'Problem %i' % problem_num: is_problem = True if is_problem: if line == last_line == '\n': break else: yield line[:-1] last_line = line problem_lines = [line for line in _problem_iter(self.num)] if problem_lines: # First three lines are the problem number, the divider line, # and a newline, so don't include them in the returned string. # Also, strip the final newline. return '\n'.join(problem_lines[3:-1]) else: msg = 'Problem %i not found in problems.txt.' % self.num click.secho(msg, fg='red') click.echo('If this problem exists on Project Euler, consider ' 'submitting a pull request to EulerPy on GitHub.') sys.exit(1)
iKevinY/EulerPy
EulerPy/problem.py
Problem.glob
python
def glob(self): file_glob = glob.glob(BASE_NAME.format('*', self.num, '*', '.*')) # Sort globbed files by tuple (filename, extension) return sorted(file_glob, key=lambda f: os.path.splitext(f))
Returns a sorted glob of files belonging to a given problem
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/problem.py#L30-L35
null
class Problem(object): """Represents a Project Euler problem of a given problem number""" def __init__(self, problem_number): self.num = problem_number def filename(self, prefix='', suffix='', extension='.py'): """Returns filename padded with leading zeros""" return BASE_NAME.format(prefix, self.num, suffix, extension) @property @property def file(self): """Returns a ProblemFile instance of the first matching file""" return ProblemFile(self.glob[0]) if self.glob else None @property def resources(self): """Returns a list of resources related to the problem (or None)""" with open(os.path.join(EULER_DATA, 'resources.json')) as data_file: data = json.load(data_file) problem_num = str(self.num) if problem_num in data: files = data[problem_num] # Ensure a list of files is returned return files if isinstance(files, list) else [files] else: return None def copy_resources(self): """Copies the relevant resources to a resources subdirectory""" if not os.path.isdir('resources'): os.mkdir('resources') resource_dir = os.path.join(os.getcwd(), 'resources', '') copied_resources = [] for resource in self.resources: src = os.path.join(EULER_DATA, 'resources', resource) if os.path.isfile(src): shutil.copy(src, resource_dir) copied_resources.append(resource) if copied_resources: copied = ', '.join(copied_resources) path = os.path.relpath(resource_dir, os.pardir) msg = "Copied {} to {}.".format(copied, path) click.secho(msg, fg='green') @property def solution(self): """Returns the answer to a given problem""" num = self.num solution_file = os.path.join(EULER_DATA, 'solutions.txt') solution_line = linecache.getline(solution_file, num) try: answer = solution_line.split('. ')[1].strip() except IndexError: answer = None if answer: return answer else: msg = 'Answer for problem %i not found in solutions.txt.' % num click.secho(msg, fg='red') click.echo('If you have an answer, consider submitting a pull ' 'request to EulerPy on GitHub.') sys.exit(1) @property def text(self): """Parses problems.txt and returns problem text""" def _problem_iter(problem_num): problem_file = os.path.join(EULER_DATA, 'problems.txt') with open(problem_file) as f: is_problem = False last_line = '' for line in f: if line.strip() == 'Problem %i' % problem_num: is_problem = True if is_problem: if line == last_line == '\n': break else: yield line[:-1] last_line = line problem_lines = [line for line in _problem_iter(self.num)] if problem_lines: # First three lines are the problem number, the divider line, # and a newline, so don't include them in the returned string. # Also, strip the final newline. return '\n'.join(problem_lines[3:-1]) else: msg = 'Problem %i not found in problems.txt.' % self.num click.secho(msg, fg='red') click.echo('If this problem exists on Project Euler, consider ' 'submitting a pull request to EulerPy on GitHub.') sys.exit(1)
iKevinY/EulerPy
EulerPy/problem.py
Problem.resources
python
def resources(self): with open(os.path.join(EULER_DATA, 'resources.json')) as data_file: data = json.load(data_file) problem_num = str(self.num) if problem_num in data: files = data[problem_num] # Ensure a list of files is returned return files if isinstance(files, list) else [files] else: return None
Returns a list of resources related to the problem (or None)
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/problem.py#L43-L56
null
class Problem(object): """Represents a Project Euler problem of a given problem number""" def __init__(self, problem_number): self.num = problem_number def filename(self, prefix='', suffix='', extension='.py'): """Returns filename padded with leading zeros""" return BASE_NAME.format(prefix, self.num, suffix, extension) @property def glob(self): """Returns a sorted glob of files belonging to a given problem""" file_glob = glob.glob(BASE_NAME.format('*', self.num, '*', '.*')) # Sort globbed files by tuple (filename, extension) return sorted(file_glob, key=lambda f: os.path.splitext(f)) @property def file(self): """Returns a ProblemFile instance of the first matching file""" return ProblemFile(self.glob[0]) if self.glob else None @property def copy_resources(self): """Copies the relevant resources to a resources subdirectory""" if not os.path.isdir('resources'): os.mkdir('resources') resource_dir = os.path.join(os.getcwd(), 'resources', '') copied_resources = [] for resource in self.resources: src = os.path.join(EULER_DATA, 'resources', resource) if os.path.isfile(src): shutil.copy(src, resource_dir) copied_resources.append(resource) if copied_resources: copied = ', '.join(copied_resources) path = os.path.relpath(resource_dir, os.pardir) msg = "Copied {} to {}.".format(copied, path) click.secho(msg, fg='green') @property def solution(self): """Returns the answer to a given problem""" num = self.num solution_file = os.path.join(EULER_DATA, 'solutions.txt') solution_line = linecache.getline(solution_file, num) try: answer = solution_line.split('. ')[1].strip() except IndexError: answer = None if answer: return answer else: msg = 'Answer for problem %i not found in solutions.txt.' % num click.secho(msg, fg='red') click.echo('If you have an answer, consider submitting a pull ' 'request to EulerPy on GitHub.') sys.exit(1) @property def text(self): """Parses problems.txt and returns problem text""" def _problem_iter(problem_num): problem_file = os.path.join(EULER_DATA, 'problems.txt') with open(problem_file) as f: is_problem = False last_line = '' for line in f: if line.strip() == 'Problem %i' % problem_num: is_problem = True if is_problem: if line == last_line == '\n': break else: yield line[:-1] last_line = line problem_lines = [line for line in _problem_iter(self.num)] if problem_lines: # First three lines are the problem number, the divider line, # and a newline, so don't include them in the returned string. # Also, strip the final newline. return '\n'.join(problem_lines[3:-1]) else: msg = 'Problem %i not found in problems.txt.' % self.num click.secho(msg, fg='red') click.echo('If this problem exists on Project Euler, consider ' 'submitting a pull request to EulerPy on GitHub.') sys.exit(1)
iKevinY/EulerPy
EulerPy/problem.py
Problem.copy_resources
python
def copy_resources(self): if not os.path.isdir('resources'): os.mkdir('resources') resource_dir = os.path.join(os.getcwd(), 'resources', '') copied_resources = [] for resource in self.resources: src = os.path.join(EULER_DATA, 'resources', resource) if os.path.isfile(src): shutil.copy(src, resource_dir) copied_resources.append(resource) if copied_resources: copied = ', '.join(copied_resources) path = os.path.relpath(resource_dir, os.pardir) msg = "Copied {} to {}.".format(copied, path) click.secho(msg, fg='green')
Copies the relevant resources to a resources subdirectory
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/problem.py#L58-L77
null
class Problem(object): """Represents a Project Euler problem of a given problem number""" def __init__(self, problem_number): self.num = problem_number def filename(self, prefix='', suffix='', extension='.py'): """Returns filename padded with leading zeros""" return BASE_NAME.format(prefix, self.num, suffix, extension) @property def glob(self): """Returns a sorted glob of files belonging to a given problem""" file_glob = glob.glob(BASE_NAME.format('*', self.num, '*', '.*')) # Sort globbed files by tuple (filename, extension) return sorted(file_glob, key=lambda f: os.path.splitext(f)) @property def file(self): """Returns a ProblemFile instance of the first matching file""" return ProblemFile(self.glob[0]) if self.glob else None @property def resources(self): """Returns a list of resources related to the problem (or None)""" with open(os.path.join(EULER_DATA, 'resources.json')) as data_file: data = json.load(data_file) problem_num = str(self.num) if problem_num in data: files = data[problem_num] # Ensure a list of files is returned return files if isinstance(files, list) else [files] else: return None @property def solution(self): """Returns the answer to a given problem""" num = self.num solution_file = os.path.join(EULER_DATA, 'solutions.txt') solution_line = linecache.getline(solution_file, num) try: answer = solution_line.split('. ')[1].strip() except IndexError: answer = None if answer: return answer else: msg = 'Answer for problem %i not found in solutions.txt.' % num click.secho(msg, fg='red') click.echo('If you have an answer, consider submitting a pull ' 'request to EulerPy on GitHub.') sys.exit(1) @property def text(self): """Parses problems.txt and returns problem text""" def _problem_iter(problem_num): problem_file = os.path.join(EULER_DATA, 'problems.txt') with open(problem_file) as f: is_problem = False last_line = '' for line in f: if line.strip() == 'Problem %i' % problem_num: is_problem = True if is_problem: if line == last_line == '\n': break else: yield line[:-1] last_line = line problem_lines = [line for line in _problem_iter(self.num)] if problem_lines: # First three lines are the problem number, the divider line, # and a newline, so don't include them in the returned string. # Also, strip the final newline. return '\n'.join(problem_lines[3:-1]) else: msg = 'Problem %i not found in problems.txt.' % self.num click.secho(msg, fg='red') click.echo('If this problem exists on Project Euler, consider ' 'submitting a pull request to EulerPy on GitHub.') sys.exit(1)
iKevinY/EulerPy
EulerPy/problem.py
Problem.solution
python
def solution(self): num = self.num solution_file = os.path.join(EULER_DATA, 'solutions.txt') solution_line = linecache.getline(solution_file, num) try: answer = solution_line.split('. ')[1].strip() except IndexError: answer = None if answer: return answer else: msg = 'Answer for problem %i not found in solutions.txt.' % num click.secho(msg, fg='red') click.echo('If you have an answer, consider submitting a pull ' 'request to EulerPy on GitHub.') sys.exit(1)
Returns the answer to a given problem
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/problem.py#L80-L99
null
class Problem(object): """Represents a Project Euler problem of a given problem number""" def __init__(self, problem_number): self.num = problem_number def filename(self, prefix='', suffix='', extension='.py'): """Returns filename padded with leading zeros""" return BASE_NAME.format(prefix, self.num, suffix, extension) @property def glob(self): """Returns a sorted glob of files belonging to a given problem""" file_glob = glob.glob(BASE_NAME.format('*', self.num, '*', '.*')) # Sort globbed files by tuple (filename, extension) return sorted(file_glob, key=lambda f: os.path.splitext(f)) @property def file(self): """Returns a ProblemFile instance of the first matching file""" return ProblemFile(self.glob[0]) if self.glob else None @property def resources(self): """Returns a list of resources related to the problem (or None)""" with open(os.path.join(EULER_DATA, 'resources.json')) as data_file: data = json.load(data_file) problem_num = str(self.num) if problem_num in data: files = data[problem_num] # Ensure a list of files is returned return files if isinstance(files, list) else [files] else: return None def copy_resources(self): """Copies the relevant resources to a resources subdirectory""" if not os.path.isdir('resources'): os.mkdir('resources') resource_dir = os.path.join(os.getcwd(), 'resources', '') copied_resources = [] for resource in self.resources: src = os.path.join(EULER_DATA, 'resources', resource) if os.path.isfile(src): shutil.copy(src, resource_dir) copied_resources.append(resource) if copied_resources: copied = ', '.join(copied_resources) path = os.path.relpath(resource_dir, os.pardir) msg = "Copied {} to {}.".format(copied, path) click.secho(msg, fg='green') @property @property def text(self): """Parses problems.txt and returns problem text""" def _problem_iter(problem_num): problem_file = os.path.join(EULER_DATA, 'problems.txt') with open(problem_file) as f: is_problem = False last_line = '' for line in f: if line.strip() == 'Problem %i' % problem_num: is_problem = True if is_problem: if line == last_line == '\n': break else: yield line[:-1] last_line = line problem_lines = [line for line in _problem_iter(self.num)] if problem_lines: # First three lines are the problem number, the divider line, # and a newline, so don't include them in the returned string. # Also, strip the final newline. return '\n'.join(problem_lines[3:-1]) else: msg = 'Problem %i not found in problems.txt.' % self.num click.secho(msg, fg='red') click.echo('If this problem exists on Project Euler, consider ' 'submitting a pull request to EulerPy on GitHub.') sys.exit(1)
iKevinY/EulerPy
EulerPy/problem.py
Problem.text
python
def text(self): def _problem_iter(problem_num): problem_file = os.path.join(EULER_DATA, 'problems.txt') with open(problem_file) as f: is_problem = False last_line = '' for line in f: if line.strip() == 'Problem %i' % problem_num: is_problem = True if is_problem: if line == last_line == '\n': break else: yield line[:-1] last_line = line problem_lines = [line for line in _problem_iter(self.num)] if problem_lines: # First three lines are the problem number, the divider line, # and a newline, so don't include them in the returned string. # Also, strip the final newline. return '\n'.join(problem_lines[3:-1]) else: msg = 'Problem %i not found in problems.txt.' % self.num click.secho(msg, fg='red') click.echo('If this problem exists on Project Euler, consider ' 'submitting a pull request to EulerPy on GitHub.') sys.exit(1)
Parses problems.txt and returns problem text
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/problem.py#L102-L134
[ "def _problem_iter(problem_num):\n problem_file = os.path.join(EULER_DATA, 'problems.txt')\n\n with open(problem_file) as f:\n is_problem = False\n last_line = ''\n\n for line in f:\n if line.strip() == 'Problem %i' % problem_num:\n is_problem = True\n\n if is_problem:\n if line == last_line == '\\n':\n break\n else:\n yield line[:-1]\n last_line = line\n" ]
class Problem(object): """Represents a Project Euler problem of a given problem number""" def __init__(self, problem_number): self.num = problem_number def filename(self, prefix='', suffix='', extension='.py'): """Returns filename padded with leading zeros""" return BASE_NAME.format(prefix, self.num, suffix, extension) @property def glob(self): """Returns a sorted glob of files belonging to a given problem""" file_glob = glob.glob(BASE_NAME.format('*', self.num, '*', '.*')) # Sort globbed files by tuple (filename, extension) return sorted(file_glob, key=lambda f: os.path.splitext(f)) @property def file(self): """Returns a ProblemFile instance of the first matching file""" return ProblemFile(self.glob[0]) if self.glob else None @property def resources(self): """Returns a list of resources related to the problem (or None)""" with open(os.path.join(EULER_DATA, 'resources.json')) as data_file: data = json.load(data_file) problem_num = str(self.num) if problem_num in data: files = data[problem_num] # Ensure a list of files is returned return files if isinstance(files, list) else [files] else: return None def copy_resources(self): """Copies the relevant resources to a resources subdirectory""" if not os.path.isdir('resources'): os.mkdir('resources') resource_dir = os.path.join(os.getcwd(), 'resources', '') copied_resources = [] for resource in self.resources: src = os.path.join(EULER_DATA, 'resources', resource) if os.path.isfile(src): shutil.copy(src, resource_dir) copied_resources.append(resource) if copied_resources: copied = ', '.join(copied_resources) path = os.path.relpath(resource_dir, os.pardir) msg = "Copied {} to {}.".format(copied, path) click.secho(msg, fg='green') @property def solution(self): """Returns the answer to a given problem""" num = self.num solution_file = os.path.join(EULER_DATA, 'solutions.txt') solution_line = linecache.getline(solution_file, num) try: answer = solution_line.split('. ')[1].strip() except IndexError: answer = None if answer: return answer else: msg = 'Answer for problem %i not found in solutions.txt.' % num click.secho(msg, fg='red') click.echo('If you have an answer, consider submitting a pull ' 'request to EulerPy on GitHub.') sys.exit(1) @property
iKevinY/EulerPy
EulerPy/euler.py
cheat
python
def cheat(num): # Define solution before echoing in case solution does not exist solution = click.style(Problem(num).solution, bold=True) click.confirm("View answer to problem %i?" % num, abort=True) click.echo("The answer to problem {} is {}.".format(num, solution))
View the answer to a problem.
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L16-L21
null
# -*- coding: utf-8 -*- import os import sys import subprocess from collections import OrderedDict import click from EulerPy import __version__ from EulerPy.problem import Problem from EulerPy.utils import clock, format_time, problem_glob # --cheat / -c # --generate / -g def generate(num, prompt_default=True): """Generates Python file for a problem.""" p = Problem(num) problem_text = p.text msg = "Generate file for problem %i?" % num click.confirm(msg, default=prompt_default, abort=True) # Allow skipped problem files to be recreated if p.glob: filename = str(p.file) msg = '"{}" already exists. Overwrite?'.format(filename) click.confirm(click.style(msg, fg='red'), abort=True) else: # Try to keep prefix consistent with existing files previous_file = Problem(num - 1).file prefix = previous_file.prefix if previous_file else '' filename = p.filename(prefix=prefix) header = 'Project Euler Problem %i' % num divider = '=' * len(header) text = '\n'.join([header, divider, '', problem_text]) content = '\n'.join(['"""', text, '"""']) with open(filename, 'w') as f: f.write(content + '\n\n\n') click.secho('Successfully created "{}".'.format(filename), fg='green') # Copy over problem resources if required if p.resources: p.copy_resources() # --preview / -p def preview(num): """Prints the text of a problem.""" # Define problem_text before echoing in case problem does not exist problem_text = Problem(num).text click.secho("Project Euler Problem %i" % num, bold=True) click.echo(problem_text) # --skip / -s def skip(num): """Generates Python file for the next problem.""" click.echo("Current problem is problem %i." % num) generate(num + 1, prompt_default=False) Problem(num).file.change_suffix('-skipped') # --verify / -v def verify(num, filename=None, exit=True): """Verifies the solution to a problem.""" p = Problem(num) filename = filename or p.filename() if not os.path.isfile(filename): # Attempt to verify the first problem file matched by glob if p.glob: filename = str(p.file) else: click.secho('No file found for problem %i.' % p.num, fg='red') sys.exit(1) solution = p.solution click.echo('Checking "{}" against solution: '.format(filename), nl=False) cmd = (sys.executable or 'python', filename) start = clock() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout = proc.communicate()[0] end = clock() time_info = format_time(start, end) # Return value of anything other than 0 indicates an error if proc.poll() != 0: click.secho('Error calling "{}".'.format(filename), fg='red') click.secho(time_info, fg='cyan') # Return None if option is not --verify-all, otherwise exit return sys.exit(1) if exit else None # Decode output if returned as bytes (Python 3) if isinstance(stdout, bytes): output = stdout.decode('ascii') # Split output lines into array; make empty output more readable output_lines = output.splitlines() if output else ['[no output]'] # If output is multi-lined, print the first line of the output on a # separate line from the "checking against solution" message, and # skip the solution check (multi-line solution won't be correct) if len(output_lines) > 1: is_correct = False click.echo() # force output to start on next line click.secho('\n'.join(output_lines), bold=True, fg='red') else: is_correct = output_lines[0] == solution fg_colour = 'green' if is_correct else 'red' click.secho(output_lines[0], bold=True, fg=fg_colour) click.secho(time_info, fg='cyan') # Remove any suffix from the filename if its solution is correct if is_correct: p.file.change_suffix('') # Exit here if answer was incorrect, otherwise return is_correct value return sys.exit(1) if exit and not is_correct else is_correct # --verify-all def verify_all(num): """ Verifies all problem files in the current directory and prints an overview of the status of each problem. """ # Define various problem statuses keys = ('correct', 'incorrect', 'error', 'skipped', 'missing') symbols = ('C', 'I', 'E', 'S', '.') colours = ('green', 'red', 'yellow', 'cyan', 'white') status = OrderedDict( (key, click.style(symbol, fg=colour, bold=True)) for key, symbol, colour in zip(keys, symbols, colours) ) overview = {} # Search through problem files using glob module files = problem_glob() # No Project Euler files in the current directory if not files: click.echo("No Project Euler files found in the current directory.") sys.exit(1) for file in files: # Catch KeyboardInterrupt during verification to allow the user to # skip the verification of a specific problem if it takes too long try: is_correct = verify(file.num, filename=str(file), exit=False) except KeyboardInterrupt: overview[file.num] = status['skipped'] else: if is_correct is None: # error was returned by problem file overview[file.num] = status['error'] elif is_correct: overview[file.num] = status['correct'] elif not is_correct: overview[file.num] = status['incorrect'] # Attempt to add "skipped" suffix to the filename if the # problem file is not the current problem. This is useful # when the --verify-all is used in a directory containing # files generated pre-v1.1 (before files with suffixes) if file.num != num: file.change_suffix('-skipped') # Separate each verification with a newline click.echo() # Print overview of the status of each problem legend = ', '.join('{} = {}'.format(v, k) for k, v in status.items()) click.echo('-' * 63) click.echo(legend + '\n') # Rows needed for overview is based on the current problem number num_of_rows = (num + 19) // 20 for row in range(1, num_of_rows + 1): low, high = (row * 20) - 19, (row * 20) click.echo("Problems {:03d}-{:03d}: ".format(low, high), nl=False) for problem in range(low, high + 1): # Add missing status to problems with no corresponding file status = overview[problem] if problem in overview else '.' # Separate problem indicators into groups of 5 spacer = ' ' if (problem % 5 == 0) else ' ' # Start a new line at the end of each row click.secho(status + spacer, nl=(problem % 20 == 0)) click.echo() def euler_options(fn): """Decorator to link CLI options with their appropriate functions""" euler_functions = cheat, generate, preview, skip, verify, verify_all # Reverse functions to print help page options in alphabetical order for option in reversed(euler_functions): name, docstring = option.__name__, option.__doc__ kwargs = {'flag_value': option, 'help': docstring} # Apply flag(s) depending on whether or not name is a single word flag = '--%s' % name.replace('_', '-') flags = [flag] if '_' in name else [flag, '-%s' % name[0]] fn = click.option('option', *flags, **kwargs)(fn) return fn @click.command(name='euler', options_metavar='[OPTION]') @click.argument('problem', default=0, type=click.IntRange(0, None)) @euler_options @click.version_option(version=__version__, message="EulerPy %(version)s") def main(option, problem): """Python-based Project Euler command line tool.""" # No problem given (or given option ignores the problem argument) if problem == 0 or option in {skip, verify_all}: # Determine the highest problem number in the current directory files = problem_glob() problem = max(file.num for file in files) if files else 0 # No Project Euler files in current directory (no glob results) if problem == 0: # Generate the first problem file if option is appropriate if option not in {cheat, preview, verify_all}: msg = "No Project Euler files found in the current directory." click.echo(msg) option = generate # Set problem number to 1 problem = 1 # --preview and no problem; preview the next problem elif option is preview: problem += 1 # No option and no problem; generate next file if answer is # correct (verify() will exit if the solution is incorrect) if option is None: verify(problem) problem += 1 option = generate # Problem given but no option; decide between generate and verify elif option is None: option = verify if Problem(problem).glob else generate # Execute function based on option option(problem) sys.exit(0)
iKevinY/EulerPy
EulerPy/euler.py
generate
python
def generate(num, prompt_default=True): p = Problem(num) problem_text = p.text msg = "Generate file for problem %i?" % num click.confirm(msg, default=prompt_default, abort=True) # Allow skipped problem files to be recreated if p.glob: filename = str(p.file) msg = '"{}" already exists. Overwrite?'.format(filename) click.confirm(click.style(msg, fg='red'), abort=True) else: # Try to keep prefix consistent with existing files previous_file = Problem(num - 1).file prefix = previous_file.prefix if previous_file else '' filename = p.filename(prefix=prefix) header = 'Project Euler Problem %i' % num divider = '=' * len(header) text = '\n'.join([header, divider, '', problem_text]) content = '\n'.join(['"""', text, '"""']) with open(filename, 'w') as f: f.write(content + '\n\n\n') click.secho('Successfully created "{}".'.format(filename), fg='green') # Copy over problem resources if required if p.resources: p.copy_resources()
Generates Python file for a problem.
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L25-L57
[ "def filename(self, prefix='', suffix='', extension='.py'):\n \"\"\"Returns filename padded with leading zeros\"\"\"\n return BASE_NAME.format(prefix, self.num, suffix, extension)\n", "def copy_resources(self):\n \"\"\"Copies the relevant resources to a resources subdirectory\"\"\"\n if not os.path.isdir('resources'):\n os.mkdir('resources')\n\n resource_dir = os.path.join(os.getcwd(), 'resources', '')\n copied_resources = []\n\n for resource in self.resources:\n src = os.path.join(EULER_DATA, 'resources', resource)\n if os.path.isfile(src):\n shutil.copy(src, resource_dir)\n copied_resources.append(resource)\n\n if copied_resources:\n copied = ', '.join(copied_resources)\n path = os.path.relpath(resource_dir, os.pardir)\n msg = \"Copied {} to {}.\".format(copied, path)\n\n click.secho(msg, fg='green')\n" ]
# -*- coding: utf-8 -*- import os import sys import subprocess from collections import OrderedDict import click from EulerPy import __version__ from EulerPy.problem import Problem from EulerPy.utils import clock, format_time, problem_glob # --cheat / -c def cheat(num): """View the answer to a problem.""" # Define solution before echoing in case solution does not exist solution = click.style(Problem(num).solution, bold=True) click.confirm("View answer to problem %i?" % num, abort=True) click.echo("The answer to problem {} is {}.".format(num, solution)) # --generate / -g # --preview / -p def preview(num): """Prints the text of a problem.""" # Define problem_text before echoing in case problem does not exist problem_text = Problem(num).text click.secho("Project Euler Problem %i" % num, bold=True) click.echo(problem_text) # --skip / -s def skip(num): """Generates Python file for the next problem.""" click.echo("Current problem is problem %i." % num) generate(num + 1, prompt_default=False) Problem(num).file.change_suffix('-skipped') # --verify / -v def verify(num, filename=None, exit=True): """Verifies the solution to a problem.""" p = Problem(num) filename = filename or p.filename() if not os.path.isfile(filename): # Attempt to verify the first problem file matched by glob if p.glob: filename = str(p.file) else: click.secho('No file found for problem %i.' % p.num, fg='red') sys.exit(1) solution = p.solution click.echo('Checking "{}" against solution: '.format(filename), nl=False) cmd = (sys.executable or 'python', filename) start = clock() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout = proc.communicate()[0] end = clock() time_info = format_time(start, end) # Return value of anything other than 0 indicates an error if proc.poll() != 0: click.secho('Error calling "{}".'.format(filename), fg='red') click.secho(time_info, fg='cyan') # Return None if option is not --verify-all, otherwise exit return sys.exit(1) if exit else None # Decode output if returned as bytes (Python 3) if isinstance(stdout, bytes): output = stdout.decode('ascii') # Split output lines into array; make empty output more readable output_lines = output.splitlines() if output else ['[no output]'] # If output is multi-lined, print the first line of the output on a # separate line from the "checking against solution" message, and # skip the solution check (multi-line solution won't be correct) if len(output_lines) > 1: is_correct = False click.echo() # force output to start on next line click.secho('\n'.join(output_lines), bold=True, fg='red') else: is_correct = output_lines[0] == solution fg_colour = 'green' if is_correct else 'red' click.secho(output_lines[0], bold=True, fg=fg_colour) click.secho(time_info, fg='cyan') # Remove any suffix from the filename if its solution is correct if is_correct: p.file.change_suffix('') # Exit here if answer was incorrect, otherwise return is_correct value return sys.exit(1) if exit and not is_correct else is_correct # --verify-all def verify_all(num): """ Verifies all problem files in the current directory and prints an overview of the status of each problem. """ # Define various problem statuses keys = ('correct', 'incorrect', 'error', 'skipped', 'missing') symbols = ('C', 'I', 'E', 'S', '.') colours = ('green', 'red', 'yellow', 'cyan', 'white') status = OrderedDict( (key, click.style(symbol, fg=colour, bold=True)) for key, symbol, colour in zip(keys, symbols, colours) ) overview = {} # Search through problem files using glob module files = problem_glob() # No Project Euler files in the current directory if not files: click.echo("No Project Euler files found in the current directory.") sys.exit(1) for file in files: # Catch KeyboardInterrupt during verification to allow the user to # skip the verification of a specific problem if it takes too long try: is_correct = verify(file.num, filename=str(file), exit=False) except KeyboardInterrupt: overview[file.num] = status['skipped'] else: if is_correct is None: # error was returned by problem file overview[file.num] = status['error'] elif is_correct: overview[file.num] = status['correct'] elif not is_correct: overview[file.num] = status['incorrect'] # Attempt to add "skipped" suffix to the filename if the # problem file is not the current problem. This is useful # when the --verify-all is used in a directory containing # files generated pre-v1.1 (before files with suffixes) if file.num != num: file.change_suffix('-skipped') # Separate each verification with a newline click.echo() # Print overview of the status of each problem legend = ', '.join('{} = {}'.format(v, k) for k, v in status.items()) click.echo('-' * 63) click.echo(legend + '\n') # Rows needed for overview is based on the current problem number num_of_rows = (num + 19) // 20 for row in range(1, num_of_rows + 1): low, high = (row * 20) - 19, (row * 20) click.echo("Problems {:03d}-{:03d}: ".format(low, high), nl=False) for problem in range(low, high + 1): # Add missing status to problems with no corresponding file status = overview[problem] if problem in overview else '.' # Separate problem indicators into groups of 5 spacer = ' ' if (problem % 5 == 0) else ' ' # Start a new line at the end of each row click.secho(status + spacer, nl=(problem % 20 == 0)) click.echo() def euler_options(fn): """Decorator to link CLI options with their appropriate functions""" euler_functions = cheat, generate, preview, skip, verify, verify_all # Reverse functions to print help page options in alphabetical order for option in reversed(euler_functions): name, docstring = option.__name__, option.__doc__ kwargs = {'flag_value': option, 'help': docstring} # Apply flag(s) depending on whether or not name is a single word flag = '--%s' % name.replace('_', '-') flags = [flag] if '_' in name else [flag, '-%s' % name[0]] fn = click.option('option', *flags, **kwargs)(fn) return fn @click.command(name='euler', options_metavar='[OPTION]') @click.argument('problem', default=0, type=click.IntRange(0, None)) @euler_options @click.version_option(version=__version__, message="EulerPy %(version)s") def main(option, problem): """Python-based Project Euler command line tool.""" # No problem given (or given option ignores the problem argument) if problem == 0 or option in {skip, verify_all}: # Determine the highest problem number in the current directory files = problem_glob() problem = max(file.num for file in files) if files else 0 # No Project Euler files in current directory (no glob results) if problem == 0: # Generate the first problem file if option is appropriate if option not in {cheat, preview, verify_all}: msg = "No Project Euler files found in the current directory." click.echo(msg) option = generate # Set problem number to 1 problem = 1 # --preview and no problem; preview the next problem elif option is preview: problem += 1 # No option and no problem; generate next file if answer is # correct (verify() will exit if the solution is incorrect) if option is None: verify(problem) problem += 1 option = generate # Problem given but no option; decide between generate and verify elif option is None: option = verify if Problem(problem).glob else generate # Execute function based on option option(problem) sys.exit(0)
iKevinY/EulerPy
EulerPy/euler.py
preview
python
def preview(num): # Define problem_text before echoing in case problem does not exist problem_text = Problem(num).text click.secho("Project Euler Problem %i" % num, bold=True) click.echo(problem_text)
Prints the text of a problem.
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L61-L66
null
# -*- coding: utf-8 -*- import os import sys import subprocess from collections import OrderedDict import click from EulerPy import __version__ from EulerPy.problem import Problem from EulerPy.utils import clock, format_time, problem_glob # --cheat / -c def cheat(num): """View the answer to a problem.""" # Define solution before echoing in case solution does not exist solution = click.style(Problem(num).solution, bold=True) click.confirm("View answer to problem %i?" % num, abort=True) click.echo("The answer to problem {} is {}.".format(num, solution)) # --generate / -g def generate(num, prompt_default=True): """Generates Python file for a problem.""" p = Problem(num) problem_text = p.text msg = "Generate file for problem %i?" % num click.confirm(msg, default=prompt_default, abort=True) # Allow skipped problem files to be recreated if p.glob: filename = str(p.file) msg = '"{}" already exists. Overwrite?'.format(filename) click.confirm(click.style(msg, fg='red'), abort=True) else: # Try to keep prefix consistent with existing files previous_file = Problem(num - 1).file prefix = previous_file.prefix if previous_file else '' filename = p.filename(prefix=prefix) header = 'Project Euler Problem %i' % num divider = '=' * len(header) text = '\n'.join([header, divider, '', problem_text]) content = '\n'.join(['"""', text, '"""']) with open(filename, 'w') as f: f.write(content + '\n\n\n') click.secho('Successfully created "{}".'.format(filename), fg='green') # Copy over problem resources if required if p.resources: p.copy_resources() # --preview / -p # --skip / -s def skip(num): """Generates Python file for the next problem.""" click.echo("Current problem is problem %i." % num) generate(num + 1, prompt_default=False) Problem(num).file.change_suffix('-skipped') # --verify / -v def verify(num, filename=None, exit=True): """Verifies the solution to a problem.""" p = Problem(num) filename = filename or p.filename() if not os.path.isfile(filename): # Attempt to verify the first problem file matched by glob if p.glob: filename = str(p.file) else: click.secho('No file found for problem %i.' % p.num, fg='red') sys.exit(1) solution = p.solution click.echo('Checking "{}" against solution: '.format(filename), nl=False) cmd = (sys.executable or 'python', filename) start = clock() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout = proc.communicate()[0] end = clock() time_info = format_time(start, end) # Return value of anything other than 0 indicates an error if proc.poll() != 0: click.secho('Error calling "{}".'.format(filename), fg='red') click.secho(time_info, fg='cyan') # Return None if option is not --verify-all, otherwise exit return sys.exit(1) if exit else None # Decode output if returned as bytes (Python 3) if isinstance(stdout, bytes): output = stdout.decode('ascii') # Split output lines into array; make empty output more readable output_lines = output.splitlines() if output else ['[no output]'] # If output is multi-lined, print the first line of the output on a # separate line from the "checking against solution" message, and # skip the solution check (multi-line solution won't be correct) if len(output_lines) > 1: is_correct = False click.echo() # force output to start on next line click.secho('\n'.join(output_lines), bold=True, fg='red') else: is_correct = output_lines[0] == solution fg_colour = 'green' if is_correct else 'red' click.secho(output_lines[0], bold=True, fg=fg_colour) click.secho(time_info, fg='cyan') # Remove any suffix from the filename if its solution is correct if is_correct: p.file.change_suffix('') # Exit here if answer was incorrect, otherwise return is_correct value return sys.exit(1) if exit and not is_correct else is_correct # --verify-all def verify_all(num): """ Verifies all problem files in the current directory and prints an overview of the status of each problem. """ # Define various problem statuses keys = ('correct', 'incorrect', 'error', 'skipped', 'missing') symbols = ('C', 'I', 'E', 'S', '.') colours = ('green', 'red', 'yellow', 'cyan', 'white') status = OrderedDict( (key, click.style(symbol, fg=colour, bold=True)) for key, symbol, colour in zip(keys, symbols, colours) ) overview = {} # Search through problem files using glob module files = problem_glob() # No Project Euler files in the current directory if not files: click.echo("No Project Euler files found in the current directory.") sys.exit(1) for file in files: # Catch KeyboardInterrupt during verification to allow the user to # skip the verification of a specific problem if it takes too long try: is_correct = verify(file.num, filename=str(file), exit=False) except KeyboardInterrupt: overview[file.num] = status['skipped'] else: if is_correct is None: # error was returned by problem file overview[file.num] = status['error'] elif is_correct: overview[file.num] = status['correct'] elif not is_correct: overview[file.num] = status['incorrect'] # Attempt to add "skipped" suffix to the filename if the # problem file is not the current problem. This is useful # when the --verify-all is used in a directory containing # files generated pre-v1.1 (before files with suffixes) if file.num != num: file.change_suffix('-skipped') # Separate each verification with a newline click.echo() # Print overview of the status of each problem legend = ', '.join('{} = {}'.format(v, k) for k, v in status.items()) click.echo('-' * 63) click.echo(legend + '\n') # Rows needed for overview is based on the current problem number num_of_rows = (num + 19) // 20 for row in range(1, num_of_rows + 1): low, high = (row * 20) - 19, (row * 20) click.echo("Problems {:03d}-{:03d}: ".format(low, high), nl=False) for problem in range(low, high + 1): # Add missing status to problems with no corresponding file status = overview[problem] if problem in overview else '.' # Separate problem indicators into groups of 5 spacer = ' ' if (problem % 5 == 0) else ' ' # Start a new line at the end of each row click.secho(status + spacer, nl=(problem % 20 == 0)) click.echo() def euler_options(fn): """Decorator to link CLI options with their appropriate functions""" euler_functions = cheat, generate, preview, skip, verify, verify_all # Reverse functions to print help page options in alphabetical order for option in reversed(euler_functions): name, docstring = option.__name__, option.__doc__ kwargs = {'flag_value': option, 'help': docstring} # Apply flag(s) depending on whether or not name is a single word flag = '--%s' % name.replace('_', '-') flags = [flag] if '_' in name else [flag, '-%s' % name[0]] fn = click.option('option', *flags, **kwargs)(fn) return fn @click.command(name='euler', options_metavar='[OPTION]') @click.argument('problem', default=0, type=click.IntRange(0, None)) @euler_options @click.version_option(version=__version__, message="EulerPy %(version)s") def main(option, problem): """Python-based Project Euler command line tool.""" # No problem given (or given option ignores the problem argument) if problem == 0 or option in {skip, verify_all}: # Determine the highest problem number in the current directory files = problem_glob() problem = max(file.num for file in files) if files else 0 # No Project Euler files in current directory (no glob results) if problem == 0: # Generate the first problem file if option is appropriate if option not in {cheat, preview, verify_all}: msg = "No Project Euler files found in the current directory." click.echo(msg) option = generate # Set problem number to 1 problem = 1 # --preview and no problem; preview the next problem elif option is preview: problem += 1 # No option and no problem; generate next file if answer is # correct (verify() will exit if the solution is incorrect) if option is None: verify(problem) problem += 1 option = generate # Problem given but no option; decide between generate and verify elif option is None: option = verify if Problem(problem).glob else generate # Execute function based on option option(problem) sys.exit(0)
iKevinY/EulerPy
EulerPy/euler.py
skip
python
def skip(num): click.echo("Current problem is problem %i." % num) generate(num + 1, prompt_default=False) Problem(num).file.change_suffix('-skipped')
Generates Python file for the next problem.
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L70-L74
[ "def generate(num, prompt_default=True):\n \"\"\"Generates Python file for a problem.\"\"\"\n p = Problem(num)\n\n problem_text = p.text\n\n msg = \"Generate file for problem %i?\" % num\n click.confirm(msg, default=prompt_default, abort=True)\n\n # Allow skipped problem files to be recreated\n if p.glob:\n filename = str(p.file)\n msg = '\"{}\" already exists. Overwrite?'.format(filename)\n click.confirm(click.style(msg, fg='red'), abort=True)\n else:\n # Try to keep prefix consistent with existing files\n previous_file = Problem(num - 1).file\n prefix = previous_file.prefix if previous_file else ''\n filename = p.filename(prefix=prefix)\n\n header = 'Project Euler Problem %i' % num\n divider = '=' * len(header)\n text = '\\n'.join([header, divider, '', problem_text])\n content = '\\n'.join(['\"\"\"', text, '\"\"\"'])\n\n with open(filename, 'w') as f:\n f.write(content + '\\n\\n\\n')\n\n click.secho('Successfully created \"{}\".'.format(filename), fg='green')\n\n # Copy over problem resources if required\n if p.resources:\n p.copy_resources()\n" ]
# -*- coding: utf-8 -*- import os import sys import subprocess from collections import OrderedDict import click from EulerPy import __version__ from EulerPy.problem import Problem from EulerPy.utils import clock, format_time, problem_glob # --cheat / -c def cheat(num): """View the answer to a problem.""" # Define solution before echoing in case solution does not exist solution = click.style(Problem(num).solution, bold=True) click.confirm("View answer to problem %i?" % num, abort=True) click.echo("The answer to problem {} is {}.".format(num, solution)) # --generate / -g def generate(num, prompt_default=True): """Generates Python file for a problem.""" p = Problem(num) problem_text = p.text msg = "Generate file for problem %i?" % num click.confirm(msg, default=prompt_default, abort=True) # Allow skipped problem files to be recreated if p.glob: filename = str(p.file) msg = '"{}" already exists. Overwrite?'.format(filename) click.confirm(click.style(msg, fg='red'), abort=True) else: # Try to keep prefix consistent with existing files previous_file = Problem(num - 1).file prefix = previous_file.prefix if previous_file else '' filename = p.filename(prefix=prefix) header = 'Project Euler Problem %i' % num divider = '=' * len(header) text = '\n'.join([header, divider, '', problem_text]) content = '\n'.join(['"""', text, '"""']) with open(filename, 'w') as f: f.write(content + '\n\n\n') click.secho('Successfully created "{}".'.format(filename), fg='green') # Copy over problem resources if required if p.resources: p.copy_resources() # --preview / -p def preview(num): """Prints the text of a problem.""" # Define problem_text before echoing in case problem does not exist problem_text = Problem(num).text click.secho("Project Euler Problem %i" % num, bold=True) click.echo(problem_text) # --skip / -s # --verify / -v def verify(num, filename=None, exit=True): """Verifies the solution to a problem.""" p = Problem(num) filename = filename or p.filename() if not os.path.isfile(filename): # Attempt to verify the first problem file matched by glob if p.glob: filename = str(p.file) else: click.secho('No file found for problem %i.' % p.num, fg='red') sys.exit(1) solution = p.solution click.echo('Checking "{}" against solution: '.format(filename), nl=False) cmd = (sys.executable or 'python', filename) start = clock() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout = proc.communicate()[0] end = clock() time_info = format_time(start, end) # Return value of anything other than 0 indicates an error if proc.poll() != 0: click.secho('Error calling "{}".'.format(filename), fg='red') click.secho(time_info, fg='cyan') # Return None if option is not --verify-all, otherwise exit return sys.exit(1) if exit else None # Decode output if returned as bytes (Python 3) if isinstance(stdout, bytes): output = stdout.decode('ascii') # Split output lines into array; make empty output more readable output_lines = output.splitlines() if output else ['[no output]'] # If output is multi-lined, print the first line of the output on a # separate line from the "checking against solution" message, and # skip the solution check (multi-line solution won't be correct) if len(output_lines) > 1: is_correct = False click.echo() # force output to start on next line click.secho('\n'.join(output_lines), bold=True, fg='red') else: is_correct = output_lines[0] == solution fg_colour = 'green' if is_correct else 'red' click.secho(output_lines[0], bold=True, fg=fg_colour) click.secho(time_info, fg='cyan') # Remove any suffix from the filename if its solution is correct if is_correct: p.file.change_suffix('') # Exit here if answer was incorrect, otherwise return is_correct value return sys.exit(1) if exit and not is_correct else is_correct # --verify-all def verify_all(num): """ Verifies all problem files in the current directory and prints an overview of the status of each problem. """ # Define various problem statuses keys = ('correct', 'incorrect', 'error', 'skipped', 'missing') symbols = ('C', 'I', 'E', 'S', '.') colours = ('green', 'red', 'yellow', 'cyan', 'white') status = OrderedDict( (key, click.style(symbol, fg=colour, bold=True)) for key, symbol, colour in zip(keys, symbols, colours) ) overview = {} # Search through problem files using glob module files = problem_glob() # No Project Euler files in the current directory if not files: click.echo("No Project Euler files found in the current directory.") sys.exit(1) for file in files: # Catch KeyboardInterrupt during verification to allow the user to # skip the verification of a specific problem if it takes too long try: is_correct = verify(file.num, filename=str(file), exit=False) except KeyboardInterrupt: overview[file.num] = status['skipped'] else: if is_correct is None: # error was returned by problem file overview[file.num] = status['error'] elif is_correct: overview[file.num] = status['correct'] elif not is_correct: overview[file.num] = status['incorrect'] # Attempt to add "skipped" suffix to the filename if the # problem file is not the current problem. This is useful # when the --verify-all is used in a directory containing # files generated pre-v1.1 (before files with suffixes) if file.num != num: file.change_suffix('-skipped') # Separate each verification with a newline click.echo() # Print overview of the status of each problem legend = ', '.join('{} = {}'.format(v, k) for k, v in status.items()) click.echo('-' * 63) click.echo(legend + '\n') # Rows needed for overview is based on the current problem number num_of_rows = (num + 19) // 20 for row in range(1, num_of_rows + 1): low, high = (row * 20) - 19, (row * 20) click.echo("Problems {:03d}-{:03d}: ".format(low, high), nl=False) for problem in range(low, high + 1): # Add missing status to problems with no corresponding file status = overview[problem] if problem in overview else '.' # Separate problem indicators into groups of 5 spacer = ' ' if (problem % 5 == 0) else ' ' # Start a new line at the end of each row click.secho(status + spacer, nl=(problem % 20 == 0)) click.echo() def euler_options(fn): """Decorator to link CLI options with their appropriate functions""" euler_functions = cheat, generate, preview, skip, verify, verify_all # Reverse functions to print help page options in alphabetical order for option in reversed(euler_functions): name, docstring = option.__name__, option.__doc__ kwargs = {'flag_value': option, 'help': docstring} # Apply flag(s) depending on whether or not name is a single word flag = '--%s' % name.replace('_', '-') flags = [flag] if '_' in name else [flag, '-%s' % name[0]] fn = click.option('option', *flags, **kwargs)(fn) return fn @click.command(name='euler', options_metavar='[OPTION]') @click.argument('problem', default=0, type=click.IntRange(0, None)) @euler_options @click.version_option(version=__version__, message="EulerPy %(version)s") def main(option, problem): """Python-based Project Euler command line tool.""" # No problem given (or given option ignores the problem argument) if problem == 0 or option in {skip, verify_all}: # Determine the highest problem number in the current directory files = problem_glob() problem = max(file.num for file in files) if files else 0 # No Project Euler files in current directory (no glob results) if problem == 0: # Generate the first problem file if option is appropriate if option not in {cheat, preview, verify_all}: msg = "No Project Euler files found in the current directory." click.echo(msg) option = generate # Set problem number to 1 problem = 1 # --preview and no problem; preview the next problem elif option is preview: problem += 1 # No option and no problem; generate next file if answer is # correct (verify() will exit if the solution is incorrect) if option is None: verify(problem) problem += 1 option = generate # Problem given but no option; decide between generate and verify elif option is None: option = verify if Problem(problem).glob else generate # Execute function based on option option(problem) sys.exit(0)
iKevinY/EulerPy
EulerPy/euler.py
verify
python
def verify(num, filename=None, exit=True): p = Problem(num) filename = filename or p.filename() if not os.path.isfile(filename): # Attempt to verify the first problem file matched by glob if p.glob: filename = str(p.file) else: click.secho('No file found for problem %i.' % p.num, fg='red') sys.exit(1) solution = p.solution click.echo('Checking "{}" against solution: '.format(filename), nl=False) cmd = (sys.executable or 'python', filename) start = clock() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout = proc.communicate()[0] end = clock() time_info = format_time(start, end) # Return value of anything other than 0 indicates an error if proc.poll() != 0: click.secho('Error calling "{}".'.format(filename), fg='red') click.secho(time_info, fg='cyan') # Return None if option is not --verify-all, otherwise exit return sys.exit(1) if exit else None # Decode output if returned as bytes (Python 3) if isinstance(stdout, bytes): output = stdout.decode('ascii') # Split output lines into array; make empty output more readable output_lines = output.splitlines() if output else ['[no output]'] # If output is multi-lined, print the first line of the output on a # separate line from the "checking against solution" message, and # skip the solution check (multi-line solution won't be correct) if len(output_lines) > 1: is_correct = False click.echo() # force output to start on next line click.secho('\n'.join(output_lines), bold=True, fg='red') else: is_correct = output_lines[0] == solution fg_colour = 'green' if is_correct else 'red' click.secho(output_lines[0], bold=True, fg=fg_colour) click.secho(time_info, fg='cyan') # Remove any suffix from the filename if its solution is correct if is_correct: p.file.change_suffix('') # Exit here if answer was incorrect, otherwise return is_correct value return sys.exit(1) if exit and not is_correct else is_correct
Verifies the solution to a problem.
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L78-L136
[ "def clock():\n \"\"\"\n Under Windows, system CPU time can't be measured. Return time.clock()\n as user time and None as system time.\n \"\"\"\n return time.clock(), None\n", "def clock():\n \"\"\"\n Returns a tuple (t_user, t_system) since the start of the process.\n This is done via a call to resource.getrusage, so it avoids the\n wraparound problems in time.clock().\n \"\"\"\n return resource.getrusage(resource.RUSAGE_CHILDREN)[:2]\n", "def format_time(start, end):\n \"\"\"Returns string with relevant time information formatted properly\"\"\"\n try:\n cpu_usr = end[0] - start[0]\n cpu_sys = end[1] - start[1]\n\n except TypeError:\n # `clock()[1] == None` so subtraction results in a TypeError\n return 'Time elapsed: {}'.format(human_time(cpu_usr))\n\n else:\n times = (human_time(x) for x in (cpu_usr, cpu_sys, cpu_usr + cpu_sys))\n return 'Time elapsed: user: {}, sys: {}, total: {}'.format(*times)\n", "def filename(self, prefix='', suffix='', extension='.py'):\n \"\"\"Returns filename padded with leading zeros\"\"\"\n return BASE_NAME.format(prefix, self.num, suffix, extension)\n" ]
# -*- coding: utf-8 -*- import os import sys import subprocess from collections import OrderedDict import click from EulerPy import __version__ from EulerPy.problem import Problem from EulerPy.utils import clock, format_time, problem_glob # --cheat / -c def cheat(num): """View the answer to a problem.""" # Define solution before echoing in case solution does not exist solution = click.style(Problem(num).solution, bold=True) click.confirm("View answer to problem %i?" % num, abort=True) click.echo("The answer to problem {} is {}.".format(num, solution)) # --generate / -g def generate(num, prompt_default=True): """Generates Python file for a problem.""" p = Problem(num) problem_text = p.text msg = "Generate file for problem %i?" % num click.confirm(msg, default=prompt_default, abort=True) # Allow skipped problem files to be recreated if p.glob: filename = str(p.file) msg = '"{}" already exists. Overwrite?'.format(filename) click.confirm(click.style(msg, fg='red'), abort=True) else: # Try to keep prefix consistent with existing files previous_file = Problem(num - 1).file prefix = previous_file.prefix if previous_file else '' filename = p.filename(prefix=prefix) header = 'Project Euler Problem %i' % num divider = '=' * len(header) text = '\n'.join([header, divider, '', problem_text]) content = '\n'.join(['"""', text, '"""']) with open(filename, 'w') as f: f.write(content + '\n\n\n') click.secho('Successfully created "{}".'.format(filename), fg='green') # Copy over problem resources if required if p.resources: p.copy_resources() # --preview / -p def preview(num): """Prints the text of a problem.""" # Define problem_text before echoing in case problem does not exist problem_text = Problem(num).text click.secho("Project Euler Problem %i" % num, bold=True) click.echo(problem_text) # --skip / -s def skip(num): """Generates Python file for the next problem.""" click.echo("Current problem is problem %i." % num) generate(num + 1, prompt_default=False) Problem(num).file.change_suffix('-skipped') # --verify / -v # --verify-all def verify_all(num): """ Verifies all problem files in the current directory and prints an overview of the status of each problem. """ # Define various problem statuses keys = ('correct', 'incorrect', 'error', 'skipped', 'missing') symbols = ('C', 'I', 'E', 'S', '.') colours = ('green', 'red', 'yellow', 'cyan', 'white') status = OrderedDict( (key, click.style(symbol, fg=colour, bold=True)) for key, symbol, colour in zip(keys, symbols, colours) ) overview = {} # Search through problem files using glob module files = problem_glob() # No Project Euler files in the current directory if not files: click.echo("No Project Euler files found in the current directory.") sys.exit(1) for file in files: # Catch KeyboardInterrupt during verification to allow the user to # skip the verification of a specific problem if it takes too long try: is_correct = verify(file.num, filename=str(file), exit=False) except KeyboardInterrupt: overview[file.num] = status['skipped'] else: if is_correct is None: # error was returned by problem file overview[file.num] = status['error'] elif is_correct: overview[file.num] = status['correct'] elif not is_correct: overview[file.num] = status['incorrect'] # Attempt to add "skipped" suffix to the filename if the # problem file is not the current problem. This is useful # when the --verify-all is used in a directory containing # files generated pre-v1.1 (before files with suffixes) if file.num != num: file.change_suffix('-skipped') # Separate each verification with a newline click.echo() # Print overview of the status of each problem legend = ', '.join('{} = {}'.format(v, k) for k, v in status.items()) click.echo('-' * 63) click.echo(legend + '\n') # Rows needed for overview is based on the current problem number num_of_rows = (num + 19) // 20 for row in range(1, num_of_rows + 1): low, high = (row * 20) - 19, (row * 20) click.echo("Problems {:03d}-{:03d}: ".format(low, high), nl=False) for problem in range(low, high + 1): # Add missing status to problems with no corresponding file status = overview[problem] if problem in overview else '.' # Separate problem indicators into groups of 5 spacer = ' ' if (problem % 5 == 0) else ' ' # Start a new line at the end of each row click.secho(status + spacer, nl=(problem % 20 == 0)) click.echo() def euler_options(fn): """Decorator to link CLI options with their appropriate functions""" euler_functions = cheat, generate, preview, skip, verify, verify_all # Reverse functions to print help page options in alphabetical order for option in reversed(euler_functions): name, docstring = option.__name__, option.__doc__ kwargs = {'flag_value': option, 'help': docstring} # Apply flag(s) depending on whether or not name is a single word flag = '--%s' % name.replace('_', '-') flags = [flag] if '_' in name else [flag, '-%s' % name[0]] fn = click.option('option', *flags, **kwargs)(fn) return fn @click.command(name='euler', options_metavar='[OPTION]') @click.argument('problem', default=0, type=click.IntRange(0, None)) @euler_options @click.version_option(version=__version__, message="EulerPy %(version)s") def main(option, problem): """Python-based Project Euler command line tool.""" # No problem given (or given option ignores the problem argument) if problem == 0 or option in {skip, verify_all}: # Determine the highest problem number in the current directory files = problem_glob() problem = max(file.num for file in files) if files else 0 # No Project Euler files in current directory (no glob results) if problem == 0: # Generate the first problem file if option is appropriate if option not in {cheat, preview, verify_all}: msg = "No Project Euler files found in the current directory." click.echo(msg) option = generate # Set problem number to 1 problem = 1 # --preview and no problem; preview the next problem elif option is preview: problem += 1 # No option and no problem; generate next file if answer is # correct (verify() will exit if the solution is incorrect) if option is None: verify(problem) problem += 1 option = generate # Problem given but no option; decide between generate and verify elif option is None: option = verify if Problem(problem).glob else generate # Execute function based on option option(problem) sys.exit(0)
iKevinY/EulerPy
EulerPy/euler.py
verify_all
python
def verify_all(num): # Define various problem statuses keys = ('correct', 'incorrect', 'error', 'skipped', 'missing') symbols = ('C', 'I', 'E', 'S', '.') colours = ('green', 'red', 'yellow', 'cyan', 'white') status = OrderedDict( (key, click.style(symbol, fg=colour, bold=True)) for key, symbol, colour in zip(keys, symbols, colours) ) overview = {} # Search through problem files using glob module files = problem_glob() # No Project Euler files in the current directory if not files: click.echo("No Project Euler files found in the current directory.") sys.exit(1) for file in files: # Catch KeyboardInterrupt during verification to allow the user to # skip the verification of a specific problem if it takes too long try: is_correct = verify(file.num, filename=str(file), exit=False) except KeyboardInterrupt: overview[file.num] = status['skipped'] else: if is_correct is None: # error was returned by problem file overview[file.num] = status['error'] elif is_correct: overview[file.num] = status['correct'] elif not is_correct: overview[file.num] = status['incorrect'] # Attempt to add "skipped" suffix to the filename if the # problem file is not the current problem. This is useful # when the --verify-all is used in a directory containing # files generated pre-v1.1 (before files with suffixes) if file.num != num: file.change_suffix('-skipped') # Separate each verification with a newline click.echo() # Print overview of the status of each problem legend = ', '.join('{} = {}'.format(v, k) for k, v in status.items()) click.echo('-' * 63) click.echo(legend + '\n') # Rows needed for overview is based on the current problem number num_of_rows = (num + 19) // 20 for row in range(1, num_of_rows + 1): low, high = (row * 20) - 19, (row * 20) click.echo("Problems {:03d}-{:03d}: ".format(low, high), nl=False) for problem in range(low, high + 1): # Add missing status to problems with no corresponding file status = overview[problem] if problem in overview else '.' # Separate problem indicators into groups of 5 spacer = ' ' if (problem % 5 == 0) else ' ' # Start a new line at the end of each row click.secho(status + spacer, nl=(problem % 20 == 0)) click.echo()
Verifies all problem files in the current directory and prints an overview of the status of each problem.
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L140-L214
[ "def problem_glob(extension='.py'):\n \"\"\"Returns ProblemFile objects for all valid problem files\"\"\"\n filenames = glob.glob('*[0-9][0-9][0-9]*{}'.format(extension))\n return [ProblemFile(file) for file in filenames]\n", "def verify(num, filename=None, exit=True):\n \"\"\"Verifies the solution to a problem.\"\"\"\n p = Problem(num)\n\n filename = filename or p.filename()\n\n if not os.path.isfile(filename):\n # Attempt to verify the first problem file matched by glob\n if p.glob:\n filename = str(p.file)\n else:\n click.secho('No file found for problem %i.' % p.num, fg='red')\n sys.exit(1)\n\n solution = p.solution\n click.echo('Checking \"{}\" against solution: '.format(filename), nl=False)\n\n cmd = (sys.executable or 'python', filename)\n start = clock()\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n stdout = proc.communicate()[0]\n end = clock()\n time_info = format_time(start, end)\n\n # Return value of anything other than 0 indicates an error\n if proc.poll() != 0:\n click.secho('Error calling \"{}\".'.format(filename), fg='red')\n click.secho(time_info, fg='cyan')\n\n # Return None if option is not --verify-all, otherwise exit\n return sys.exit(1) if exit else None\n\n # Decode output if returned as bytes (Python 3)\n if isinstance(stdout, bytes):\n output = stdout.decode('ascii')\n\n # Split output lines into array; make empty output more readable\n output_lines = output.splitlines() if output else ['[no output]']\n\n # If output is multi-lined, print the first line of the output on a\n # separate line from the \"checking against solution\" message, and\n # skip the solution check (multi-line solution won't be correct)\n if len(output_lines) > 1:\n is_correct = False\n click.echo() # force output to start on next line\n click.secho('\\n'.join(output_lines), bold=True, fg='red')\n else:\n is_correct = output_lines[0] == solution\n fg_colour = 'green' if is_correct else 'red'\n click.secho(output_lines[0], bold=True, fg=fg_colour)\n\n click.secho(time_info, fg='cyan')\n\n # Remove any suffix from the filename if its solution is correct\n if is_correct:\n p.file.change_suffix('')\n\n # Exit here if answer was incorrect, otherwise return is_correct value\n return sys.exit(1) if exit and not is_correct else is_correct\n" ]
# -*- coding: utf-8 -*- import os import sys import subprocess from collections import OrderedDict import click from EulerPy import __version__ from EulerPy.problem import Problem from EulerPy.utils import clock, format_time, problem_glob # --cheat / -c def cheat(num): """View the answer to a problem.""" # Define solution before echoing in case solution does not exist solution = click.style(Problem(num).solution, bold=True) click.confirm("View answer to problem %i?" % num, abort=True) click.echo("The answer to problem {} is {}.".format(num, solution)) # --generate / -g def generate(num, prompt_default=True): """Generates Python file for a problem.""" p = Problem(num) problem_text = p.text msg = "Generate file for problem %i?" % num click.confirm(msg, default=prompt_default, abort=True) # Allow skipped problem files to be recreated if p.glob: filename = str(p.file) msg = '"{}" already exists. Overwrite?'.format(filename) click.confirm(click.style(msg, fg='red'), abort=True) else: # Try to keep prefix consistent with existing files previous_file = Problem(num - 1).file prefix = previous_file.prefix if previous_file else '' filename = p.filename(prefix=prefix) header = 'Project Euler Problem %i' % num divider = '=' * len(header) text = '\n'.join([header, divider, '', problem_text]) content = '\n'.join(['"""', text, '"""']) with open(filename, 'w') as f: f.write(content + '\n\n\n') click.secho('Successfully created "{}".'.format(filename), fg='green') # Copy over problem resources if required if p.resources: p.copy_resources() # --preview / -p def preview(num): """Prints the text of a problem.""" # Define problem_text before echoing in case problem does not exist problem_text = Problem(num).text click.secho("Project Euler Problem %i" % num, bold=True) click.echo(problem_text) # --skip / -s def skip(num): """Generates Python file for the next problem.""" click.echo("Current problem is problem %i." % num) generate(num + 1, prompt_default=False) Problem(num).file.change_suffix('-skipped') # --verify / -v def verify(num, filename=None, exit=True): """Verifies the solution to a problem.""" p = Problem(num) filename = filename or p.filename() if not os.path.isfile(filename): # Attempt to verify the first problem file matched by glob if p.glob: filename = str(p.file) else: click.secho('No file found for problem %i.' % p.num, fg='red') sys.exit(1) solution = p.solution click.echo('Checking "{}" against solution: '.format(filename), nl=False) cmd = (sys.executable or 'python', filename) start = clock() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout = proc.communicate()[0] end = clock() time_info = format_time(start, end) # Return value of anything other than 0 indicates an error if proc.poll() != 0: click.secho('Error calling "{}".'.format(filename), fg='red') click.secho(time_info, fg='cyan') # Return None if option is not --verify-all, otherwise exit return sys.exit(1) if exit else None # Decode output if returned as bytes (Python 3) if isinstance(stdout, bytes): output = stdout.decode('ascii') # Split output lines into array; make empty output more readable output_lines = output.splitlines() if output else ['[no output]'] # If output is multi-lined, print the first line of the output on a # separate line from the "checking against solution" message, and # skip the solution check (multi-line solution won't be correct) if len(output_lines) > 1: is_correct = False click.echo() # force output to start on next line click.secho('\n'.join(output_lines), bold=True, fg='red') else: is_correct = output_lines[0] == solution fg_colour = 'green' if is_correct else 'red' click.secho(output_lines[0], bold=True, fg=fg_colour) click.secho(time_info, fg='cyan') # Remove any suffix from the filename if its solution is correct if is_correct: p.file.change_suffix('') # Exit here if answer was incorrect, otherwise return is_correct value return sys.exit(1) if exit and not is_correct else is_correct # --verify-all def euler_options(fn): """Decorator to link CLI options with their appropriate functions""" euler_functions = cheat, generate, preview, skip, verify, verify_all # Reverse functions to print help page options in alphabetical order for option in reversed(euler_functions): name, docstring = option.__name__, option.__doc__ kwargs = {'flag_value': option, 'help': docstring} # Apply flag(s) depending on whether or not name is a single word flag = '--%s' % name.replace('_', '-') flags = [flag] if '_' in name else [flag, '-%s' % name[0]] fn = click.option('option', *flags, **kwargs)(fn) return fn @click.command(name='euler', options_metavar='[OPTION]') @click.argument('problem', default=0, type=click.IntRange(0, None)) @euler_options @click.version_option(version=__version__, message="EulerPy %(version)s") def main(option, problem): """Python-based Project Euler command line tool.""" # No problem given (or given option ignores the problem argument) if problem == 0 or option in {skip, verify_all}: # Determine the highest problem number in the current directory files = problem_glob() problem = max(file.num for file in files) if files else 0 # No Project Euler files in current directory (no glob results) if problem == 0: # Generate the first problem file if option is appropriate if option not in {cheat, preview, verify_all}: msg = "No Project Euler files found in the current directory." click.echo(msg) option = generate # Set problem number to 1 problem = 1 # --preview and no problem; preview the next problem elif option is preview: problem += 1 # No option and no problem; generate next file if answer is # correct (verify() will exit if the solution is incorrect) if option is None: verify(problem) problem += 1 option = generate # Problem given but no option; decide between generate and verify elif option is None: option = verify if Problem(problem).glob else generate # Execute function based on option option(problem) sys.exit(0)
iKevinY/EulerPy
EulerPy/euler.py
euler_options
python
def euler_options(fn): euler_functions = cheat, generate, preview, skip, verify, verify_all # Reverse functions to print help page options in alphabetical order for option in reversed(euler_functions): name, docstring = option.__name__, option.__doc__ kwargs = {'flag_value': option, 'help': docstring} # Apply flag(s) depending on whether or not name is a single word flag = '--%s' % name.replace('_', '-') flags = [flag] if '_' in name else [flag, '-%s' % name[0]] fn = click.option('option', *flags, **kwargs)(fn) return fn
Decorator to link CLI options with their appropriate functions
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L217-L232
null
# -*- coding: utf-8 -*- import os import sys import subprocess from collections import OrderedDict import click from EulerPy import __version__ from EulerPy.problem import Problem from EulerPy.utils import clock, format_time, problem_glob # --cheat / -c def cheat(num): """View the answer to a problem.""" # Define solution before echoing in case solution does not exist solution = click.style(Problem(num).solution, bold=True) click.confirm("View answer to problem %i?" % num, abort=True) click.echo("The answer to problem {} is {}.".format(num, solution)) # --generate / -g def generate(num, prompt_default=True): """Generates Python file for a problem.""" p = Problem(num) problem_text = p.text msg = "Generate file for problem %i?" % num click.confirm(msg, default=prompt_default, abort=True) # Allow skipped problem files to be recreated if p.glob: filename = str(p.file) msg = '"{}" already exists. Overwrite?'.format(filename) click.confirm(click.style(msg, fg='red'), abort=True) else: # Try to keep prefix consistent with existing files previous_file = Problem(num - 1).file prefix = previous_file.prefix if previous_file else '' filename = p.filename(prefix=prefix) header = 'Project Euler Problem %i' % num divider = '=' * len(header) text = '\n'.join([header, divider, '', problem_text]) content = '\n'.join(['"""', text, '"""']) with open(filename, 'w') as f: f.write(content + '\n\n\n') click.secho('Successfully created "{}".'.format(filename), fg='green') # Copy over problem resources if required if p.resources: p.copy_resources() # --preview / -p def preview(num): """Prints the text of a problem.""" # Define problem_text before echoing in case problem does not exist problem_text = Problem(num).text click.secho("Project Euler Problem %i" % num, bold=True) click.echo(problem_text) # --skip / -s def skip(num): """Generates Python file for the next problem.""" click.echo("Current problem is problem %i." % num) generate(num + 1, prompt_default=False) Problem(num).file.change_suffix('-skipped') # --verify / -v def verify(num, filename=None, exit=True): """Verifies the solution to a problem.""" p = Problem(num) filename = filename or p.filename() if not os.path.isfile(filename): # Attempt to verify the first problem file matched by glob if p.glob: filename = str(p.file) else: click.secho('No file found for problem %i.' % p.num, fg='red') sys.exit(1) solution = p.solution click.echo('Checking "{}" against solution: '.format(filename), nl=False) cmd = (sys.executable or 'python', filename) start = clock() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout = proc.communicate()[0] end = clock() time_info = format_time(start, end) # Return value of anything other than 0 indicates an error if proc.poll() != 0: click.secho('Error calling "{}".'.format(filename), fg='red') click.secho(time_info, fg='cyan') # Return None if option is not --verify-all, otherwise exit return sys.exit(1) if exit else None # Decode output if returned as bytes (Python 3) if isinstance(stdout, bytes): output = stdout.decode('ascii') # Split output lines into array; make empty output more readable output_lines = output.splitlines() if output else ['[no output]'] # If output is multi-lined, print the first line of the output on a # separate line from the "checking against solution" message, and # skip the solution check (multi-line solution won't be correct) if len(output_lines) > 1: is_correct = False click.echo() # force output to start on next line click.secho('\n'.join(output_lines), bold=True, fg='red') else: is_correct = output_lines[0] == solution fg_colour = 'green' if is_correct else 'red' click.secho(output_lines[0], bold=True, fg=fg_colour) click.secho(time_info, fg='cyan') # Remove any suffix from the filename if its solution is correct if is_correct: p.file.change_suffix('') # Exit here if answer was incorrect, otherwise return is_correct value return sys.exit(1) if exit and not is_correct else is_correct # --verify-all def verify_all(num): """ Verifies all problem files in the current directory and prints an overview of the status of each problem. """ # Define various problem statuses keys = ('correct', 'incorrect', 'error', 'skipped', 'missing') symbols = ('C', 'I', 'E', 'S', '.') colours = ('green', 'red', 'yellow', 'cyan', 'white') status = OrderedDict( (key, click.style(symbol, fg=colour, bold=True)) for key, symbol, colour in zip(keys, symbols, colours) ) overview = {} # Search through problem files using glob module files = problem_glob() # No Project Euler files in the current directory if not files: click.echo("No Project Euler files found in the current directory.") sys.exit(1) for file in files: # Catch KeyboardInterrupt during verification to allow the user to # skip the verification of a specific problem if it takes too long try: is_correct = verify(file.num, filename=str(file), exit=False) except KeyboardInterrupt: overview[file.num] = status['skipped'] else: if is_correct is None: # error was returned by problem file overview[file.num] = status['error'] elif is_correct: overview[file.num] = status['correct'] elif not is_correct: overview[file.num] = status['incorrect'] # Attempt to add "skipped" suffix to the filename if the # problem file is not the current problem. This is useful # when the --verify-all is used in a directory containing # files generated pre-v1.1 (before files with suffixes) if file.num != num: file.change_suffix('-skipped') # Separate each verification with a newline click.echo() # Print overview of the status of each problem legend = ', '.join('{} = {}'.format(v, k) for k, v in status.items()) click.echo('-' * 63) click.echo(legend + '\n') # Rows needed for overview is based on the current problem number num_of_rows = (num + 19) // 20 for row in range(1, num_of_rows + 1): low, high = (row * 20) - 19, (row * 20) click.echo("Problems {:03d}-{:03d}: ".format(low, high), nl=False) for problem in range(low, high + 1): # Add missing status to problems with no corresponding file status = overview[problem] if problem in overview else '.' # Separate problem indicators into groups of 5 spacer = ' ' if (problem % 5 == 0) else ' ' # Start a new line at the end of each row click.secho(status + spacer, nl=(problem % 20 == 0)) click.echo() @click.command(name='euler', options_metavar='[OPTION]') @click.argument('problem', default=0, type=click.IntRange(0, None)) @euler_options @click.version_option(version=__version__, message="EulerPy %(version)s") def main(option, problem): """Python-based Project Euler command line tool.""" # No problem given (or given option ignores the problem argument) if problem == 0 or option in {skip, verify_all}: # Determine the highest problem number in the current directory files = problem_glob() problem = max(file.num for file in files) if files else 0 # No Project Euler files in current directory (no glob results) if problem == 0: # Generate the first problem file if option is appropriate if option not in {cheat, preview, verify_all}: msg = "No Project Euler files found in the current directory." click.echo(msg) option = generate # Set problem number to 1 problem = 1 # --preview and no problem; preview the next problem elif option is preview: problem += 1 # No option and no problem; generate next file if answer is # correct (verify() will exit if the solution is incorrect) if option is None: verify(problem) problem += 1 option = generate # Problem given but no option; decide between generate and verify elif option is None: option = verify if Problem(problem).glob else generate # Execute function based on option option(problem) sys.exit(0)
iKevinY/EulerPy
EulerPy/euler.py
main
python
def main(option, problem): # No problem given (or given option ignores the problem argument) if problem == 0 or option in {skip, verify_all}: # Determine the highest problem number in the current directory files = problem_glob() problem = max(file.num for file in files) if files else 0 # No Project Euler files in current directory (no glob results) if problem == 0: # Generate the first problem file if option is appropriate if option not in {cheat, preview, verify_all}: msg = "No Project Euler files found in the current directory." click.echo(msg) option = generate # Set problem number to 1 problem = 1 # --preview and no problem; preview the next problem elif option is preview: problem += 1 # No option and no problem; generate next file if answer is # correct (verify() will exit if the solution is incorrect) if option is None: verify(problem) problem += 1 option = generate # Problem given but no option; decide between generate and verify elif option is None: option = verify if Problem(problem).glob else generate # Execute function based on option option(problem) sys.exit(0)
Python-based Project Euler command line tool.
train
https://github.com/iKevinY/EulerPy/blob/739c1c67fa7b32af9140ca51e4b4a07733e057a6/EulerPy/euler.py#L239-L275
[ "def problem_glob(extension='.py'):\n \"\"\"Returns ProblemFile objects for all valid problem files\"\"\"\n filenames = glob.glob('*[0-9][0-9][0-9]*{}'.format(extension))\n return [ProblemFile(file) for file in filenames]\n", "def generate(num, prompt_default=True):\n \"\"\"Generates Python file for a problem.\"\"\"\n p = Problem(num)\n\n problem_text = p.text\n\n msg = \"Generate file for problem %i?\" % num\n click.confirm(msg, default=prompt_default, abort=True)\n\n # Allow skipped problem files to be recreated\n if p.glob:\n filename = str(p.file)\n msg = '\"{}\" already exists. Overwrite?'.format(filename)\n click.confirm(click.style(msg, fg='red'), abort=True)\n else:\n # Try to keep prefix consistent with existing files\n previous_file = Problem(num - 1).file\n prefix = previous_file.prefix if previous_file else ''\n filename = p.filename(prefix=prefix)\n\n header = 'Project Euler Problem %i' % num\n divider = '=' * len(header)\n text = '\\n'.join([header, divider, '', problem_text])\n content = '\\n'.join(['\"\"\"', text, '\"\"\"'])\n\n with open(filename, 'w') as f:\n f.write(content + '\\n\\n\\n')\n\n click.secho('Successfully created \"{}\".'.format(filename), fg='green')\n\n # Copy over problem resources if required\n if p.resources:\n p.copy_resources()\n", "def verify(num, filename=None, exit=True):\n \"\"\"Verifies the solution to a problem.\"\"\"\n p = Problem(num)\n\n filename = filename or p.filename()\n\n if not os.path.isfile(filename):\n # Attempt to verify the first problem file matched by glob\n if p.glob:\n filename = str(p.file)\n else:\n click.secho('No file found for problem %i.' % p.num, fg='red')\n sys.exit(1)\n\n solution = p.solution\n click.echo('Checking \"{}\" against solution: '.format(filename), nl=False)\n\n cmd = (sys.executable or 'python', filename)\n start = clock()\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n stdout = proc.communicate()[0]\n end = clock()\n time_info = format_time(start, end)\n\n # Return value of anything other than 0 indicates an error\n if proc.poll() != 0:\n click.secho('Error calling \"{}\".'.format(filename), fg='red')\n click.secho(time_info, fg='cyan')\n\n # Return None if option is not --verify-all, otherwise exit\n return sys.exit(1) if exit else None\n\n # Decode output if returned as bytes (Python 3)\n if isinstance(stdout, bytes):\n output = stdout.decode('ascii')\n\n # Split output lines into array; make empty output more readable\n output_lines = output.splitlines() if output else ['[no output]']\n\n # If output is multi-lined, print the first line of the output on a\n # separate line from the \"checking against solution\" message, and\n # skip the solution check (multi-line solution won't be correct)\n if len(output_lines) > 1:\n is_correct = False\n click.echo() # force output to start on next line\n click.secho('\\n'.join(output_lines), bold=True, fg='red')\n else:\n is_correct = output_lines[0] == solution\n fg_colour = 'green' if is_correct else 'red'\n click.secho(output_lines[0], bold=True, fg=fg_colour)\n\n click.secho(time_info, fg='cyan')\n\n # Remove any suffix from the filename if its solution is correct\n if is_correct:\n p.file.change_suffix('')\n\n # Exit here if answer was incorrect, otherwise return is_correct value\n return sys.exit(1) if exit and not is_correct else is_correct\n" ]
# -*- coding: utf-8 -*- import os import sys import subprocess from collections import OrderedDict import click from EulerPy import __version__ from EulerPy.problem import Problem from EulerPy.utils import clock, format_time, problem_glob # --cheat / -c def cheat(num): """View the answer to a problem.""" # Define solution before echoing in case solution does not exist solution = click.style(Problem(num).solution, bold=True) click.confirm("View answer to problem %i?" % num, abort=True) click.echo("The answer to problem {} is {}.".format(num, solution)) # --generate / -g def generate(num, prompt_default=True): """Generates Python file for a problem.""" p = Problem(num) problem_text = p.text msg = "Generate file for problem %i?" % num click.confirm(msg, default=prompt_default, abort=True) # Allow skipped problem files to be recreated if p.glob: filename = str(p.file) msg = '"{}" already exists. Overwrite?'.format(filename) click.confirm(click.style(msg, fg='red'), abort=True) else: # Try to keep prefix consistent with existing files previous_file = Problem(num - 1).file prefix = previous_file.prefix if previous_file else '' filename = p.filename(prefix=prefix) header = 'Project Euler Problem %i' % num divider = '=' * len(header) text = '\n'.join([header, divider, '', problem_text]) content = '\n'.join(['"""', text, '"""']) with open(filename, 'w') as f: f.write(content + '\n\n\n') click.secho('Successfully created "{}".'.format(filename), fg='green') # Copy over problem resources if required if p.resources: p.copy_resources() # --preview / -p def preview(num): """Prints the text of a problem.""" # Define problem_text before echoing in case problem does not exist problem_text = Problem(num).text click.secho("Project Euler Problem %i" % num, bold=True) click.echo(problem_text) # --skip / -s def skip(num): """Generates Python file for the next problem.""" click.echo("Current problem is problem %i." % num) generate(num + 1, prompt_default=False) Problem(num).file.change_suffix('-skipped') # --verify / -v def verify(num, filename=None, exit=True): """Verifies the solution to a problem.""" p = Problem(num) filename = filename or p.filename() if not os.path.isfile(filename): # Attempt to verify the first problem file matched by glob if p.glob: filename = str(p.file) else: click.secho('No file found for problem %i.' % p.num, fg='red') sys.exit(1) solution = p.solution click.echo('Checking "{}" against solution: '.format(filename), nl=False) cmd = (sys.executable or 'python', filename) start = clock() proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout = proc.communicate()[0] end = clock() time_info = format_time(start, end) # Return value of anything other than 0 indicates an error if proc.poll() != 0: click.secho('Error calling "{}".'.format(filename), fg='red') click.secho(time_info, fg='cyan') # Return None if option is not --verify-all, otherwise exit return sys.exit(1) if exit else None # Decode output if returned as bytes (Python 3) if isinstance(stdout, bytes): output = stdout.decode('ascii') # Split output lines into array; make empty output more readable output_lines = output.splitlines() if output else ['[no output]'] # If output is multi-lined, print the first line of the output on a # separate line from the "checking against solution" message, and # skip the solution check (multi-line solution won't be correct) if len(output_lines) > 1: is_correct = False click.echo() # force output to start on next line click.secho('\n'.join(output_lines), bold=True, fg='red') else: is_correct = output_lines[0] == solution fg_colour = 'green' if is_correct else 'red' click.secho(output_lines[0], bold=True, fg=fg_colour) click.secho(time_info, fg='cyan') # Remove any suffix from the filename if its solution is correct if is_correct: p.file.change_suffix('') # Exit here if answer was incorrect, otherwise return is_correct value return sys.exit(1) if exit and not is_correct else is_correct # --verify-all def verify_all(num): """ Verifies all problem files in the current directory and prints an overview of the status of each problem. """ # Define various problem statuses keys = ('correct', 'incorrect', 'error', 'skipped', 'missing') symbols = ('C', 'I', 'E', 'S', '.') colours = ('green', 'red', 'yellow', 'cyan', 'white') status = OrderedDict( (key, click.style(symbol, fg=colour, bold=True)) for key, symbol, colour in zip(keys, symbols, colours) ) overview = {} # Search through problem files using glob module files = problem_glob() # No Project Euler files in the current directory if not files: click.echo("No Project Euler files found in the current directory.") sys.exit(1) for file in files: # Catch KeyboardInterrupt during verification to allow the user to # skip the verification of a specific problem if it takes too long try: is_correct = verify(file.num, filename=str(file), exit=False) except KeyboardInterrupt: overview[file.num] = status['skipped'] else: if is_correct is None: # error was returned by problem file overview[file.num] = status['error'] elif is_correct: overview[file.num] = status['correct'] elif not is_correct: overview[file.num] = status['incorrect'] # Attempt to add "skipped" suffix to the filename if the # problem file is not the current problem. This is useful # when the --verify-all is used in a directory containing # files generated pre-v1.1 (before files with suffixes) if file.num != num: file.change_suffix('-skipped') # Separate each verification with a newline click.echo() # Print overview of the status of each problem legend = ', '.join('{} = {}'.format(v, k) for k, v in status.items()) click.echo('-' * 63) click.echo(legend + '\n') # Rows needed for overview is based on the current problem number num_of_rows = (num + 19) // 20 for row in range(1, num_of_rows + 1): low, high = (row * 20) - 19, (row * 20) click.echo("Problems {:03d}-{:03d}: ".format(low, high), nl=False) for problem in range(low, high + 1): # Add missing status to problems with no corresponding file status = overview[problem] if problem in overview else '.' # Separate problem indicators into groups of 5 spacer = ' ' if (problem % 5 == 0) else ' ' # Start a new line at the end of each row click.secho(status + spacer, nl=(problem % 20 == 0)) click.echo() def euler_options(fn): """Decorator to link CLI options with their appropriate functions""" euler_functions = cheat, generate, preview, skip, verify, verify_all # Reverse functions to print help page options in alphabetical order for option in reversed(euler_functions): name, docstring = option.__name__, option.__doc__ kwargs = {'flag_value': option, 'help': docstring} # Apply flag(s) depending on whether or not name is a single word flag = '--%s' % name.replace('_', '-') flags = [flag] if '_' in name else [flag, '-%s' % name[0]] fn = click.option('option', *flags, **kwargs)(fn) return fn @click.command(name='euler', options_metavar='[OPTION]') @click.argument('problem', default=0, type=click.IntRange(0, None)) @euler_options @click.version_option(version=__version__, message="EulerPy %(version)s")
shidenggui/easyquotation
easyquotation/helpers.py
update_stock_codes
python
def update_stock_codes(): all_stock_codes_url = "http://www.shdjt.com/js/lib/astock.js" grep_stock_codes = re.compile(r"~(\d+)`") response = requests.get(all_stock_codes_url) all_stock_codes = grep_stock_codes.findall(response.text) with open(stock_code_path(), "w") as f: f.write(json.dumps(dict(stock=all_stock_codes)))
获取所有股票 ID 到 all_stock_code 目录下
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/helpers.py#L11-L18
[ "def stock_code_path():\n return os.path.join(os.path.dirname(__file__), STOCK_CODE_PATH)\n" ]
# coding:utf8 import json import os import re import requests STOCK_CODE_PATH = "stock_codes.conf" def get_stock_codes(realtime=False): """获取所有股票 ID 到 all_stock_code 目录下""" if realtime: all_stock_codes_url = "http://www.shdjt.com/js/lib/astock.js" grep_stock_codes = re.compile(r"~(\d+)`") response = requests.get(all_stock_codes_url) stock_codes = grep_stock_codes.findall(response.text) with open(stock_code_path(), "w") as f: f.write(json.dumps(dict(stock=stock_codes))) return stock_codes with open(stock_code_path()) as f: return json.load(f)["stock"] def stock_code_path(): return os.path.join(os.path.dirname(__file__), STOCK_CODE_PATH)
shidenggui/easyquotation
easyquotation/helpers.py
get_stock_codes
python
def get_stock_codes(realtime=False): if realtime: all_stock_codes_url = "http://www.shdjt.com/js/lib/astock.js" grep_stock_codes = re.compile(r"~(\d+)`") response = requests.get(all_stock_codes_url) stock_codes = grep_stock_codes.findall(response.text) with open(stock_code_path(), "w") as f: f.write(json.dumps(dict(stock=stock_codes))) return stock_codes with open(stock_code_path()) as f: return json.load(f)["stock"]
获取所有股票 ID 到 all_stock_code 目录下
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/helpers.py#L21-L33
[ "def stock_code_path():\n return os.path.join(os.path.dirname(__file__), STOCK_CODE_PATH)\n" ]
# coding:utf8 import json import os import re import requests STOCK_CODE_PATH = "stock_codes.conf" def update_stock_codes(): """获取所有股票 ID 到 all_stock_code 目录下""" all_stock_codes_url = "http://www.shdjt.com/js/lib/astock.js" grep_stock_codes = re.compile(r"~(\d+)`") response = requests.get(all_stock_codes_url) all_stock_codes = grep_stock_codes.findall(response.text) with open(stock_code_path(), "w") as f: f.write(json.dumps(dict(stock=all_stock_codes))) def stock_code_path(): return os.path.join(os.path.dirname(__file__), STOCK_CODE_PATH)
shidenggui/easyquotation
easyquotation/basequotation.py
BaseQuotation.real
python
def real(self, stock_codes, prefix=False): if not isinstance(stock_codes, list): stock_codes = [stock_codes] stock_list = self.gen_stock_list(stock_codes) return self.get_stock_data(stock_list, prefix=prefix)
return specific stocks real quotation :param stock_codes: stock code or list of stock code, when prefix is True, stock code must start with sh/sz :param prefix: if prefix i True, stock_codes must contain sh/sz market flag. If prefix is False, index quotation can't return :return quotation dict, key is stock_code, value is real quotation. If prefix with True, key start with sh/sz market flag
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/basequotation.py#L73-L87
[ "def gen_stock_list(self, stock_codes):\n stock_with_exchange_list = self._gen_stock_prefix(stock_codes)\n\n if self.max_num > len(stock_with_exchange_list):\n request_list = \",\".join(stock_with_exchange_list)\n return [request_list]\n\n stock_list = []\n request_num = len(stock_codes) // (self.max_num + 1) + 1\n for range_start in range(request_num):\n num_start = self.max_num * range_start\n num_end = self.max_num * (range_start + 1)\n request_list = \",\".join(\n stock_with_exchange_list[num_start:num_end]\n )\n stock_list.append(request_list)\n return stock_list\n", "def get_stock_data(self, stock_list, **kwargs):\n \"\"\"获取并格式化股票信息\"\"\"\n res = self._fetch_stock_data(stock_list)\n return self.format_response_data(res, **kwargs)\n" ]
class BaseQuotation(metaclass=abc.ABCMeta): """行情获取基类""" max_num = 800 # 每次请求的最大股票数 @property @abc.abstractmethod def stock_api(self) -> str: """ 行情 api 地址 """ pass def __init__(self): self._session = requests.session() stock_codes = self.load_stock_codes() self.stock_list = self.gen_stock_list(stock_codes) def gen_stock_list(self, stock_codes): stock_with_exchange_list = self._gen_stock_prefix(stock_codes) if self.max_num > len(stock_with_exchange_list): request_list = ",".join(stock_with_exchange_list) return [request_list] stock_list = [] request_num = len(stock_codes) // (self.max_num + 1) + 1 for range_start in range(request_num): num_start = self.max_num * range_start num_end = self.max_num * (range_start + 1) request_list = ",".join( stock_with_exchange_list[num_start:num_end] ) stock_list.append(request_list) return stock_list def _gen_stock_prefix(self, stock_codes): return [ easyutils.stock.get_stock_type(code) + code[-6:] for code in stock_codes ] @staticmethod def load_stock_codes(): with open(helpers.stock_code_path()) as f: return json.load(f)["stock"] @property def all(self): warnings.warn("use market_snapshot instead", DeprecationWarning) return self.get_stock_data(self.stock_list) @property def all_market(self): """return quotation with stock_code prefix key""" return self.get_stock_data(self.stock_list, prefix=True) def stocks(self, stock_codes, prefix=False): return self.real(stock_codes, prefix) def market_snapshot(self, prefix=False): """return all market quotation snapshot :param prefix: if prefix is True, return quotation dict's stock_code key start with sh/sz market flag """ return self.get_stock_data(self.stock_list, prefix=prefix) def get_stocks_by_range(self, params): headers = { "Accept-Encoding": "gzip, deflate, sdch", "User-Agent": ( "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/54.0.2840.100 " "Safari/537.36" ), } r = self._session.get(self.stock_api + params, headers=headers) return r.text def get_stock_data(self, stock_list, **kwargs): """获取并格式化股票信息""" res = self._fetch_stock_data(stock_list) return self.format_response_data(res, **kwargs) def _fetch_stock_data(self, stock_list): """获取股票信息""" pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None] def format_response_data(self, rep_data, **kwargs): pass
shidenggui/easyquotation
easyquotation/basequotation.py
BaseQuotation.market_snapshot
python
def market_snapshot(self, prefix=False): return self.get_stock_data(self.stock_list, prefix=prefix)
return all market quotation snapshot :param prefix: if prefix is True, return quotation dict's stock_code key start with sh/sz market flag
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/basequotation.py#L89-L94
[ "def get_stock_data(self, stock_list, **kwargs):\n \"\"\"获取并格式化股票信息\"\"\"\n res = self._fetch_stock_data(stock_list)\n return self.format_response_data(res, **kwargs)\n" ]
class BaseQuotation(metaclass=abc.ABCMeta): """行情获取基类""" max_num = 800 # 每次请求的最大股票数 @property @abc.abstractmethod def stock_api(self) -> str: """ 行情 api 地址 """ pass def __init__(self): self._session = requests.session() stock_codes = self.load_stock_codes() self.stock_list = self.gen_stock_list(stock_codes) def gen_stock_list(self, stock_codes): stock_with_exchange_list = self._gen_stock_prefix(stock_codes) if self.max_num > len(stock_with_exchange_list): request_list = ",".join(stock_with_exchange_list) return [request_list] stock_list = [] request_num = len(stock_codes) // (self.max_num + 1) + 1 for range_start in range(request_num): num_start = self.max_num * range_start num_end = self.max_num * (range_start + 1) request_list = ",".join( stock_with_exchange_list[num_start:num_end] ) stock_list.append(request_list) return stock_list def _gen_stock_prefix(self, stock_codes): return [ easyutils.stock.get_stock_type(code) + code[-6:] for code in stock_codes ] @staticmethod def load_stock_codes(): with open(helpers.stock_code_path()) as f: return json.load(f)["stock"] @property def all(self): warnings.warn("use market_snapshot instead", DeprecationWarning) return self.get_stock_data(self.stock_list) @property def all_market(self): """return quotation with stock_code prefix key""" return self.get_stock_data(self.stock_list, prefix=True) def stocks(self, stock_codes, prefix=False): return self.real(stock_codes, prefix) def real(self, stock_codes, prefix=False): """return specific stocks real quotation :param stock_codes: stock code or list of stock code, when prefix is True, stock code must start with sh/sz :param prefix: if prefix i True, stock_codes must contain sh/sz market flag. If prefix is False, index quotation can't return :return quotation dict, key is stock_code, value is real quotation. If prefix with True, key start with sh/sz market flag """ if not isinstance(stock_codes, list): stock_codes = [stock_codes] stock_list = self.gen_stock_list(stock_codes) return self.get_stock_data(stock_list, prefix=prefix) def get_stocks_by_range(self, params): headers = { "Accept-Encoding": "gzip, deflate, sdch", "User-Agent": ( "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/54.0.2840.100 " "Safari/537.36" ), } r = self._session.get(self.stock_api + params, headers=headers) return r.text def get_stock_data(self, stock_list, **kwargs): """获取并格式化股票信息""" res = self._fetch_stock_data(stock_list) return self.format_response_data(res, **kwargs) def _fetch_stock_data(self, stock_list): """获取股票信息""" pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None] def format_response_data(self, rep_data, **kwargs): pass
shidenggui/easyquotation
easyquotation/basequotation.py
BaseQuotation.get_stock_data
python
def get_stock_data(self, stock_list, **kwargs): res = self._fetch_stock_data(stock_list) return self.format_response_data(res, **kwargs)
获取并格式化股票信息
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/basequotation.py#L109-L112
[ "def _fetch_stock_data(self, stock_list):\n \"\"\"获取股票信息\"\"\"\n pool = multiprocessing.pool.ThreadPool(len(stock_list))\n try:\n res = pool.map(self.get_stocks_by_range, stock_list)\n finally:\n pool.close()\n return [d for d in res if d is not None]\n", "def format_response_data(self, rep_data, **kwargs):\n pass\n" ]
class BaseQuotation(metaclass=abc.ABCMeta): """行情获取基类""" max_num = 800 # 每次请求的最大股票数 @property @abc.abstractmethod def stock_api(self) -> str: """ 行情 api 地址 """ pass def __init__(self): self._session = requests.session() stock_codes = self.load_stock_codes() self.stock_list = self.gen_stock_list(stock_codes) def gen_stock_list(self, stock_codes): stock_with_exchange_list = self._gen_stock_prefix(stock_codes) if self.max_num > len(stock_with_exchange_list): request_list = ",".join(stock_with_exchange_list) return [request_list] stock_list = [] request_num = len(stock_codes) // (self.max_num + 1) + 1 for range_start in range(request_num): num_start = self.max_num * range_start num_end = self.max_num * (range_start + 1) request_list = ",".join( stock_with_exchange_list[num_start:num_end] ) stock_list.append(request_list) return stock_list def _gen_stock_prefix(self, stock_codes): return [ easyutils.stock.get_stock_type(code) + code[-6:] for code in stock_codes ] @staticmethod def load_stock_codes(): with open(helpers.stock_code_path()) as f: return json.load(f)["stock"] @property def all(self): warnings.warn("use market_snapshot instead", DeprecationWarning) return self.get_stock_data(self.stock_list) @property def all_market(self): """return quotation with stock_code prefix key""" return self.get_stock_data(self.stock_list, prefix=True) def stocks(self, stock_codes, prefix=False): return self.real(stock_codes, prefix) def real(self, stock_codes, prefix=False): """return specific stocks real quotation :param stock_codes: stock code or list of stock code, when prefix is True, stock code must start with sh/sz :param prefix: if prefix i True, stock_codes must contain sh/sz market flag. If prefix is False, index quotation can't return :return quotation dict, key is stock_code, value is real quotation. If prefix with True, key start with sh/sz market flag """ if not isinstance(stock_codes, list): stock_codes = [stock_codes] stock_list = self.gen_stock_list(stock_codes) return self.get_stock_data(stock_list, prefix=prefix) def market_snapshot(self, prefix=False): """return all market quotation snapshot :param prefix: if prefix is True, return quotation dict's stock_code key start with sh/sz market flag """ return self.get_stock_data(self.stock_list, prefix=prefix) def get_stocks_by_range(self, params): headers = { "Accept-Encoding": "gzip, deflate, sdch", "User-Agent": ( "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/54.0.2840.100 " "Safari/537.36" ), } r = self._session.get(self.stock_api + params, headers=headers) return r.text def _fetch_stock_data(self, stock_list): """获取股票信息""" pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None] def format_response_data(self, rep_data, **kwargs): pass
shidenggui/easyquotation
easyquotation/basequotation.py
BaseQuotation._fetch_stock_data
python
def _fetch_stock_data(self, stock_list): pool = multiprocessing.pool.ThreadPool(len(stock_list)) try: res = pool.map(self.get_stocks_by_range, stock_list) finally: pool.close() return [d for d in res if d is not None]
获取股票信息
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/basequotation.py#L114-L121
null
class BaseQuotation(metaclass=abc.ABCMeta): """行情获取基类""" max_num = 800 # 每次请求的最大股票数 @property @abc.abstractmethod def stock_api(self) -> str: """ 行情 api 地址 """ pass def __init__(self): self._session = requests.session() stock_codes = self.load_stock_codes() self.stock_list = self.gen_stock_list(stock_codes) def gen_stock_list(self, stock_codes): stock_with_exchange_list = self._gen_stock_prefix(stock_codes) if self.max_num > len(stock_with_exchange_list): request_list = ",".join(stock_with_exchange_list) return [request_list] stock_list = [] request_num = len(stock_codes) // (self.max_num + 1) + 1 for range_start in range(request_num): num_start = self.max_num * range_start num_end = self.max_num * (range_start + 1) request_list = ",".join( stock_with_exchange_list[num_start:num_end] ) stock_list.append(request_list) return stock_list def _gen_stock_prefix(self, stock_codes): return [ easyutils.stock.get_stock_type(code) + code[-6:] for code in stock_codes ] @staticmethod def load_stock_codes(): with open(helpers.stock_code_path()) as f: return json.load(f)["stock"] @property def all(self): warnings.warn("use market_snapshot instead", DeprecationWarning) return self.get_stock_data(self.stock_list) @property def all_market(self): """return quotation with stock_code prefix key""" return self.get_stock_data(self.stock_list, prefix=True) def stocks(self, stock_codes, prefix=False): return self.real(stock_codes, prefix) def real(self, stock_codes, prefix=False): """return specific stocks real quotation :param stock_codes: stock code or list of stock code, when prefix is True, stock code must start with sh/sz :param prefix: if prefix i True, stock_codes must contain sh/sz market flag. If prefix is False, index quotation can't return :return quotation dict, key is stock_code, value is real quotation. If prefix with True, key start with sh/sz market flag """ if not isinstance(stock_codes, list): stock_codes = [stock_codes] stock_list = self.gen_stock_list(stock_codes) return self.get_stock_data(stock_list, prefix=prefix) def market_snapshot(self, prefix=False): """return all market quotation snapshot :param prefix: if prefix is True, return quotation dict's stock_code key start with sh/sz market flag """ return self.get_stock_data(self.stock_list, prefix=prefix) def get_stocks_by_range(self, params): headers = { "Accept-Encoding": "gzip, deflate, sdch", "User-Agent": ( "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/54.0.2840.100 " "Safari/537.36" ), } r = self._session.get(self.stock_api + params, headers=headers) return r.text def get_stock_data(self, stock_list, **kwargs): """获取并格式化股票信息""" res = self._fetch_stock_data(stock_list) return self.format_response_data(res, **kwargs) def format_response_data(self, rep_data, **kwargs): pass
shidenggui/easyquotation
easyquotation/timekline.py
TimeKline._fetch_stock_data
python
def _fetch_stock_data(self, stock_list): res = super()._fetch_stock_data(stock_list) with_stock = [] for stock, resp in zip(stock_list, res): if resp is not None: with_stock.append((stock, resp)) return with_stock
因为 timekline 的返回没有带对应的股票代码,所以要手动带上
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/timekline.py#L29-L37
null
class TimeKline(basequotation.BaseQuotation): """腾讯免费行情获取""" max_num = 1 @property def stock_api(self) -> str: return "http://data.gtimg.cn/flashdata/hushen/minute/" def _gen_stock_prefix(self, stock_codes): return [ easyutils.stock.get_stock_type(code) + code[-6:] + ".js" for code in stock_codes ] def format_response_data(self, rep_data, **kwargs): stock_dict = dict() for stock_code, stock_detail in rep_data: # pylint: disable=line-too-long # res like ['min_data="', 'date:180413', '0930 11.64 29727', '0931 11.65 52410'] res = re.split(r"\\n\\\n", stock_detail) date = "20{}".format(res[1][-6:]) time_data = list( d.split() for d in res[2:] if re.match(r"\d{4}", d) ) stock_dict[stock_code] = {"date": date, "time_data": time_data} return stock_dict
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.formatfundajson
python
def formatfundajson(fundajson): result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result
格式化集思录返回的json数据,以字典形式保存
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L101-L108
null
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod @staticmethod def formatfundbjson(fundbjson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result @staticmethod def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): """以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): """以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): """ 以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}} """ # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex def qdii(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii # pylint: disable=invalid-name def cb(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.formatfundbjson
python
def formatfundbjson(fundbjson): result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result
格式化集思录返回的json数据,以字典形式保存
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L111-L118
null
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod def formatfundajson(fundajson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result @staticmethod @staticmethod def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): """以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): """以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): """ 以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}} """ # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex def qdii(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii # pylint: disable=invalid-name def cb(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.formatetfindexjson
python
def formatetfindexjson(fundbjson): result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result
格式化集思录返回 指数ETF 的json数据,以字典形式保存
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L121-L128
null
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod def formatfundajson(fundajson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result @staticmethod def formatfundbjson(fundbjson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result @staticmethod @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): """以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): """以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): """ 以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}} """ # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex def qdii(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii # pylint: disable=invalid-name def cb(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.funda
python
def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda
以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L148-L207
[ "def formatfundajson(fundajson):\n \"\"\"格式化集思录返回的json数据,以字典形式保存\"\"\"\n result = {}\n for row in fundajson[\"rows\"]:\n funda_id = row[\"id\"]\n cell = row[\"cell\"]\n result[funda_id] = cell\n return result\n" ]
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod def formatfundajson(fundajson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result @staticmethod def formatfundbjson(fundbjson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result @staticmethod def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): """以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): """ 以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}} """ # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex def qdii(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii # pylint: disable=invalid-name def cb(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.fundm
python
def fundm(self): # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm
以字典形式返回分级母基数据
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L209-L221
[ "def formatfundajson(fundajson):\n \"\"\"格式化集思录返回的json数据,以字典形式保存\"\"\"\n result = {}\n for row in fundajson[\"rows\"]:\n funda_id = row[\"id\"]\n cell = row[\"cell\"]\n result[funda_id] = cell\n return result\n" ]
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod def formatfundajson(fundajson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result @staticmethod def formatfundbjson(fundbjson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result @staticmethod def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): """以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): """以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): """ 以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}} """ # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex def qdii(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii # pylint: disable=invalid-name def cb(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.fundb
python
def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb
以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L223-L266
[ "def formatfundbjson(fundbjson):\n \"\"\"格式化集思录返回的json数据,以字典形式保存\"\"\"\n result = {}\n for row in fundbjson[\"rows\"]:\n cell = row[\"cell\"]\n fundb_id = cell[\"fundb_id\"]\n result[fundb_id] = cell\n return result\n" ]
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod def formatfundajson(fundajson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result @staticmethod def formatfundbjson(fundbjson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result @staticmethod def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): """以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): """ 以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}} """ # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex def qdii(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii # pylint: disable=invalid-name def cb(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.fundarb
python
def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb
以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L268-L323
[ "def formatfundajson(fundajson):\n \"\"\"格式化集思录返回的json数据,以字典形式保存\"\"\"\n result = {}\n for row in fundajson[\"rows\"]:\n funda_id = row[\"id\"]\n cell = row[\"cell\"]\n result[funda_id] = cell\n return result\n" ]
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod def formatfundajson(fundajson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result @staticmethod def formatfundbjson(fundbjson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result @staticmethod def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): """以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): """以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): """ 以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}} """ # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex def qdii(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii # pylint: disable=invalid-name def cb(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.etfindex
python
def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex
以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}}
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L325-L391
[ "def formatetfindexjson(fundbjson):\n \"\"\"格式化集思录返回 指数ETF 的json数据,以字典形式保存\"\"\"\n result = {}\n for row in fundbjson[\"rows\"]:\n cell = row[\"cell\"]\n fundb_id = cell[\"fund_id\"]\n result[fundb_id] = cell\n return result\n", "def percentage2float(per):\n \"\"\"\n 将字符串的百分数转化为浮点数\n :param per:\n :return:\n \"\"\"\n return float(per.strip(\"%\")) / 100.\n" ]
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod def formatfundajson(fundajson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result @staticmethod def formatfundbjson(fundbjson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result @staticmethod def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): """以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): """以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb def qdii(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii # pylint: disable=invalid-name def cb(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.qdii
python
def qdii(self, min_volume=0): # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii
以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L393-L415
[ "def formatjisilujson(data):\n result = {}\n for row in data[\"rows\"]:\n cell = row[\"cell\"]\n id_ = row[\"id\"]\n result[id_] = cell\n return result\n" ]
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod def formatfundajson(fundajson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result @staticmethod def formatfundbjson(fundbjson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result @staticmethod def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): """以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): """以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): """ 以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}} """ # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex # pylint: disable=invalid-name def cb(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
shidenggui/easyquotation
easyquotation/jsl.py
Jsl.cb
python
def cb(self, min_volume=0): # 添加当前的ctime self.__cb_url = self.__cb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__cb_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__cb = data return self.__cb
以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元
train
https://github.com/shidenggui/easyquotation/blob/a75820db4f05f5386e1c1024d05b0bfc1de6cbda/easyquotation/jsl.py#L418-L439
[ "def formatjisilujson(data):\n result = {}\n for row in data[\"rows\"]:\n cell = row[\"cell\"]\n id_ = row[\"id\"]\n result[id_] = cell\n return result\n" ]
class Jsl: """ 抓取集思路的分级A数据 """ # 分级A的接口 __funda_url = "http://www.jisilu.cn/data/sfnew/funda_list/?___t={ctime:d}" # 分级B的接口 __fundb_url = "http://www.jisilu.cn/data/sfnew/fundb_list/?___t={ctime:d}" # 母基接口 __fundm_url = "https://www.jisilu.cn/data/sfnew/fundm_list/?___t={ctime:d}" # 分级套利的接口 __fundarb_url = ( "http://www.jisilu.cn/data/sfnew/arbitrage_vip_list/?___t={ctime:d}" ) # 集思录登录接口 __jsl_login_url = "https://www.jisilu.cn/account/ajax/login_process/" # 集思录 ETF 接口 __etf_index_url = "https://www.jisilu.cn/jisiludata/etf.php?___t={ctime:d}" # 黄金 ETF , 货币 ETF 留坑,未完成 __etf_gold_url = ( "https://www.jisilu.cn/jisiludata/etf.php?qtype=pmetf&___t={ctime:d}" ) __etf_money_url = ( "https://www.jisilu.cn/data/money_fund/list/?___t={ctime:d}" ) # 集思录QDII接口 __qdii_url = "https://www.jisilu.cn/data/qdii/qdii_list/?___t={ctime:d}" # 可转债 __cb_url = "https://www.jisilu.cn/data/cbnew/cb_list/?___t={ctime:d}" # 分级A数据 # 返回的字典格式 # { 150022: # {'abrate': '5:5', # 'calc_info': None, # 'coupon_descr': '+3.0%', # 'coupon_descr_s': '+3.0%', # 'fund_descr': '每年第一个工作日定折,无下折,A不参与上折,净值<1元无定折', # 'funda_amount': 178823, # 'funda_amount_increase': '0', # 'funda_amount_increase_rt': '0.00%', # 'funda_base_est_dis_rt': '2.27%', # 'funda_base_est_dis_rt_t1': '2.27%', # 'funda_base_est_dis_rt_t2': '-0.34%', # 'funda_base_est_dis_rt_tip': '', # 'funda_base_fund_id': '163109', # 'funda_coupon': '5.75', # 'funda_coupon_next': '4.75', # 'funda_current_price': '0.783', # 'funda_discount_rt': '24.75%', # 'funda_id': '150022', # 'funda_increase_rt': '0.00%', # 'funda_index_id': '399001', # 'funda_index_increase_rt': '0.00%', # 'funda_index_name': '深证成指', # 'funda_left_year': '永续', # 'funda_lower_recalc_rt': '1.82%', # 'funda_name': '深成指A', # 'funda_nav_dt': '2015-09-14', # 'funda_profit_rt': '7.74%', # 'funda_profit_rt_next': '6.424%', # 'funda_value': '1.0405', # 'funda_volume': '0.00', # 'fundb_upper_recalc_rt': '244.35%', # 'fundb_upper_recalc_rt_info': '深成指A不参与上折', # 'last_time': '09:18:22', # 'left_recalc_year': '0.30411', # 'lower_recalc_profit_rt': '-', # 'next_recalc_dt': '<span style="font-style:italic">2016-01-04</span>', # 'owned': 0, # 'status_cd': 'N'} # } def __init__(self): self.__funda = None self.__fundm = None self.__fundb = None self.__fundarb = None self.__etfindex = None self.__qdii = None self.__cb = None @staticmethod def formatfundajson(fundajson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundajson["rows"]: funda_id = row["id"] cell = row["cell"] result[funda_id] = cell return result @staticmethod def formatfundbjson(fundbjson): """格式化集思录返回的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fundb_id"] result[fundb_id] = cell return result @staticmethod def formatetfindexjson(fundbjson): """格式化集思录返回 指数ETF 的json数据,以字典形式保存""" result = {} for row in fundbjson["rows"]: cell = row["cell"] fundb_id = cell["fund_id"] result[fundb_id] = cell return result @staticmethod def formatjisilujson(data): result = {} for row in data["rows"]: cell = row["cell"] id_ = row["id"] result[id_] = cell return result @staticmethod def percentage2float(per): """ 将字符串的百分数转化为浮点数 :param per: :return: """ return float(per.strip("%")) / 100. def funda( self, fields=None, min_volume=0, min_discount=0, ignore_nodown=False, forever=False, ): """以字典形式返回分级A数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param ignore_nodown:是否忽略无下折品种,默认 False :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__funda_url = self.__funda_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__funda_url) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["funda_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if ignore_nodown: data = { k: data[k] for k in data if data[k]["fund_descr"].find("无下折") == -1 } if forever: data = { k: data[k] for k in data if data[k]["funda_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["funda_discount_rt"][:-1]) > min_discount } self.__funda = data return self.__funda def fundm(self): """以字典形式返回分级母基数据 """ # 添加当前的ctime self.__fundm_url = self.__fundm_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundm_url) # 获取返回的json字符串 fundmjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundmjson) self.__fundm = data return self.__fundm def fundb(self, fields=None, min_volume=0, min_discount=0, forever=False): """以字典形式返回分级B数据 :param fields:利率范围,形如['+3.0%', '6.0%'] :param min_volume:最小交易量,单位万元 :param min_discount:最小折价率, 单位% :param forever: 是否选择永续品种,默认 False """ if fields is None: fields = [] # 添加当前的ctime self.__fundb_url = self.__fundb_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__fundb_url) # 获取返回的json字符串 fundbjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundbjson(fundbjson) # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["fundb_volume"]) > min_volume } if len(fields): data = { k: data[k] for k in data if data[k]["coupon_descr_s"] in "".join(fields) } if forever: data = { k: data[k] for k in data if data[k]["fundb_left_year"].find("永续") != -1 } if min_discount: data = { k: data[k] for k in data if float(data[k]["fundb_discount_rt"][:-1]) > min_discount } self.__fundb = data return self.__fundb def fundarb( self, jsl_username, jsl_password, avolume=100, bvolume=100, ptype="price", ): """以字典形式返回分级A数据 :param jsl_username: 集思录用户名 :param jsl_password: 集思路登录密码 :param avolume: A成交额,单位百万 :param bvolume: B成交额,单位百万 :param ptype: 溢价计算方式,price=现价,buy=买一,sell=卖一 """ session = requests.session() headers = { # pylint: disable=line-too-long "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" } session.headers.update(headers) logindata = dict( return_url="http://www.jisilu.cn/", user_name=jsl_username, password=jsl_password, net_auto_login="1", _post_type="ajax", ) rep = session.post(self.__jsl_login_url, data=logindata) if rep.json()["err"] is not None: return rep.json() # 添加当前的ctime fundarb_url = self.__fundarb_url.format(ctime=int(time.time())) pdata = dict( avolume=avolume, bvolume=bvolume, ptype=ptype, is_search="1", market=["sh", "sz"], rp="50", ) # 请求数据 rep = session.post(fundarb_url, data=pdata) # 获取返回的json字符串 fundajson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatfundajson(fundajson) self.__fundarb = data return self.__fundarb def etfindex( self, index_id="", min_volume=0, max_discount=None, min_discount=None ): """ 以字典形式返回 指数ETF 数据 :param index_id: 获取指定的指数 :param min_volume: 最小成交量 :param min_discount: 最低溢价率, 适用于溢价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :param max_discount: 最高溢价率, 适用于折价套利, 格式 "-1.2%", "-1.2", -0.012 三种均可 :return: {"fund_id":{}} """ # 添加当前的ctime self.__etf_index_url = self.__etf_index_url.format( ctime=int(time.time()) ) # 请求数据 rep = requests.get(self.__etf_index_url) # 获取返回的json字符串, 转化为字典 etf_json = rep.json() # 格式化返回的json字符串 data = self.formatetfindexjson(etf_json) # 过滤 if index_id: # 指定跟踪的指数代码 data = { fund_id: cell for fund_id, cell in data.items() if cell["index_id"] == index_id } if min_volume: # 过滤小于指定交易量的数据 data = { fund_id: cell for fund_id, cell in data.items() if float(cell["volume"]) >= min_volume } if min_discount is not None: # 指定最小溢价率 if isinstance(min_discount, str): if min_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 min_discount = self.percentage2float(min_discount) else: min_discount = float(min_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) >= min_discount } if max_discount is not None: # 指定最大溢价率 if isinstance(max_discount, str): if max_discount.endswith("%"): # 如果是字符串形式,先转为浮点形式 max_discount = self.percentage2float(max_discount) else: max_discount = float(max_discount) / 100. data = { fund_id: cell for fund_id, cell in data.items() if self.percentage2float(cell["discount_rt"]) <= max_discount } self.__etfindex = data return self.__etfindex def qdii(self, min_volume=0): """以字典形式返回QDII数据 :param min_volume:最小交易量,单位万元 """ # 添加当前的ctime self.__qdii_url = self.__qdii_url.format(ctime=int(time.time())) # 请求数据 rep = requests.get(self.__qdii_url) # 获取返回的json字符串 fundjson = json.loads(rep.text) # 格式化返回的json字符串 data = self.formatjisilujson(fundjson) data = {x: y for x, y in data.items() if y["notes"] != "估值有问题"} # 过滤小于指定交易量的数据 if min_volume: data = { k: data[k] for k in data if float(data[k]["volume"]) > min_volume } self.__qdii = data return self.__qdii # pylint: disable=invalid-name
seatgeek/fuzzywuzzy
fuzzywuzzy/process.py
extractWithoutOrder
python
def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): # Catch generators without lengths def no_process(x): return x try: if choices is None or len(choices) == 0: raise StopIteration except TypeError: pass # If the processor was removed by setting it to None # perfom a noop as it still needs to be a function if processor is None: processor = no_process # Run the processor on the input query. processed_query = processor(query) if len(processed_query) == 0: logging.warning(u"Applied processor reduces input query to empty string, " "all comparisons will have score 0. " "[Query: \'{0}\']".format(query)) # Don't run full_process twice if scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio, fuzz.UWRatio, fuzz.UQRatio] \ and processor == utils.full_process: processor = no_process # Only process the query once instead of for every choice if scorer in [fuzz.UWRatio, fuzz.UQRatio]: pre_processor = partial(utils.full_process, force_ascii=False) scorer = partial(scorer, full_process=False) elif scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]: pre_processor = partial(utils.full_process, force_ascii=True) scorer = partial(scorer, full_process=False) else: pre_processor = no_process processed_query = pre_processor(processed_query) try: # See if choices is a dictionary-like object. for key, choice in choices.items(): processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score, key) except AttributeError: # It's a list; just iterate over it. for choice in choices: processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score)
Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a generator of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. Returns: Generator of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return ('train', 22, 'bard'), ('man', 0, 'dog')
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/process.py#L16-L119
[ "def full_process(s, force_ascii=False):\n \"\"\"Process string by\n -- removing all but letters and numbers\n -- trim whitespace\n -- force to lower case\n if force_ascii == True, force convert to ascii\"\"\"\n\n if force_ascii:\n s = asciidammit(s)\n # Keep only Letters and Numbers (see Unicode docs).\n string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s)\n # Force into lowercase.\n string_out = StringProcessor.to_lower_case(string_out)\n # Remove leading and trailing whitespaces.\n string_out = StringProcessor.strip(string_out)\n return string_out\n", "def WRatio(s1, s2, force_ascii=True, full_process=True):\n \"\"\"\n Return a measure of the sequences' similarity between 0 and 100, using different algorithms.\n\n **Steps in the order they occur**\n\n #. Run full_process from utils on both strings\n #. Short circuit if this makes either string empty\n #. Take the ratio of the two processed strings (fuzz.ratio)\n #. Run checks to compare the length of the strings\n * If one of the strings is more than 1.5 times as long as the other\n use partial_ratio comparisons - scale partial results by 0.9\n (this makes sure only full results can return 100)\n * If one of the strings is over 8 times as long as the other\n instead scale by 0.6\n\n #. Run the other ratio functions\n * if using partial ratio functions call partial_ratio,\n partial_token_sort_ratio and partial_token_set_ratio\n scale all of these by the ratio based on length\n * otherwise call token_sort_ratio and token_set_ratio\n * all token based comparisons are scaled by 0.95\n (on top of any partial scalars)\n\n #. Take the highest value from these results\n round it and return it as an integer.\n\n :param s1:\n :param s2:\n :param force_ascii: Allow only ascii characters\n :type force_ascii: bool\n :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)\n :return:\n \"\"\"\n\n if full_process:\n p1 = utils.full_process(s1, force_ascii=force_ascii)\n p2 = utils.full_process(s2, force_ascii=force_ascii)\n else:\n p1 = s1\n p2 = s2\n\n if not utils.validate_string(p1):\n return 0\n if not utils.validate_string(p2):\n return 0\n\n # should we look at partials?\n try_partial = True\n unbase_scale = .95\n partial_scale = .90\n\n base = ratio(p1, p2)\n len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2))\n\n # if strings are similar length, don't use partials\n if len_ratio < 1.5:\n try_partial = False\n\n # if one string is much much shorter than the other\n if len_ratio > 8:\n partial_scale = .6\n\n if try_partial:\n partial = partial_ratio(p1, p2) * partial_scale\n ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \\\n * unbase_scale * partial_scale\n ptser = partial_token_set_ratio(p1, p2, full_process=False) \\\n * unbase_scale * partial_scale\n\n return utils.intr(max(base, partial, ptsor, ptser))\n else:\n tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale\n tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale\n\n return utils.intr(max(base, tsor, tser))\n", "def no_process(x):\n return x\n" ]
#!/usr/bin/env python # encoding: utf-8 from . import fuzz from . import utils import heapq import logging from functools import partial default_scorer = fuzz.WRatio default_processor = utils.full_process def extract(query, choices, processor=default_processor, scorer=default_scorer, limit=5): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a list of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: List of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return [('train', 22, 'bard'), ('man', 0, 'dog')] """ sl = extractWithoutOrder(query, choices, processor, scorer) return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \ sorted(sl, key=lambda i: i[1], reverse=True) def extractBests(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0, limit=5): """Get a list of the best matches to a collection of choices. Convenience function for getting the choices with best scores. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: A a list of (match, score) tuples. """ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) return heapq.nlargest(limit, best_list, key=lambda i: i[1]) if limit is not None else \ sorted(best_list, key=lambda i: i[1], reverse=True) def extractOne(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Find the single best match above a score in a list of choices. This is a convenience method which returns the single best choice. See extract() for the full arguments list. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. If the best match is found, but it is not greater than this number, then return None anyway ("not a good enough match"). Defaults to 0. Returns: A tuple containing a single match and its score, if a match was found that was above score_cutoff. Otherwise, returns None. """ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) try: return max(best_list, key=lambda i: i[1]) except ValueError: return None def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio): """This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify and remove duplicates. Specifically, it uses the process.extract to identify duplicates that score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list since we assume this item contains the most entity information and returns that. It breaks string length ties on an alphabetical sort. Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less sensitive. Args: contains_dupes: A list of strings that we would like to dedupe. threshold: the numerical value (0,100) point at which we expect to find duplicates. Defaults to 70 out of 100 scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.token_set_ratio() is used and expects both query and choice to be strings. Returns: A deduplicated list. For example: In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins'] In: fuzzy_dedupe(contains_dupes) Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf'] """ extractor = [] # iterate over items in *contains_dupes* for item in contains_dupes: # return all duplicate matches found matches = extract(item, contains_dupes, limit=None, scorer=scorer) # filter matches based on the threshold filtered = [x for x in matches if x[1] > threshold] # if there is only 1 item in *filtered*, no duplicates were found so append to *extracted* if len(filtered) == 1: extractor.append(filtered[0][0]) else: # alpha sort filtered = sorted(filtered, key=lambda x: x[0]) # length sort filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True) # take first item as our 'canonical example' extractor.append(filter_sort[0][0]) # uniquify *extractor* list keys = {} for e in extractor: keys[e] = 1 extractor = keys.keys() # check that extractor differs from contain_dupes (e.g. duplicates were found) # if not, then return the original list if len(extractor) == len(contains_dupes): return contains_dupes else: return extractor
seatgeek/fuzzywuzzy
fuzzywuzzy/process.py
extract
python
def extract(query, choices, processor=default_processor, scorer=default_scorer, limit=5): sl = extractWithoutOrder(query, choices, processor, scorer) return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \ sorted(sl, key=lambda i: i[1], reverse=True)
Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a list of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: List of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return [('train', 22, 'bard'), ('man', 0, 'dog')]
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/process.py#L122-L169
[ "def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0):\n \"\"\"Select the best match in a list or dictionary of choices.\n\n Find best matches in a list or dictionary of choices, return a\n generator of tuples containing the match and its score. If a dictionary\n is used, also returns the key for each match.\n\n Arguments:\n query: An object representing the thing we want to find.\n choices: An iterable or dictionary-like object containing choices\n to be matched against the query. Dictionary arguments of\n {key: value} pairs will attempt to match the query against\n each value.\n processor: Optional function of the form f(a) -> b, where a is the query or\n individual choice and b is the choice to be used in matching.\n\n This can be used to match against, say, the first element of\n a list:\n\n lambda x: x[0]\n\n Defaults to fuzzywuzzy.utils.full_process().\n scorer: Optional function for scoring matches between the query and\n an individual processed choice. This should be a function\n of the form f(query, choice) -> int.\n\n By default, fuzz.WRatio() is used and expects both query and\n choice to be strings.\n score_cutoff: Optional argument for score threshold. No matches with\n a score less than this number will be returned. Defaults to 0.\n\n Returns:\n Generator of tuples containing the match and its score.\n\n If a list is used for choices, then the result will be 2-tuples.\n If a dictionary is used, then the result will be 3-tuples containing\n the key for each match.\n\n For example, searching for 'bird' in the dictionary\n\n {'bard': 'train', 'dog': 'man'}\n\n may return\n\n ('train', 22, 'bard'), ('man', 0, 'dog')\n \"\"\"\n # Catch generators without lengths\n def no_process(x):\n return x\n\n try:\n if choices is None or len(choices) == 0:\n raise StopIteration\n except TypeError:\n pass\n\n # If the processor was removed by setting it to None\n # perfom a noop as it still needs to be a function\n if processor is None:\n processor = no_process\n\n # Run the processor on the input query.\n processed_query = processor(query)\n\n if len(processed_query) == 0:\n logging.warning(u\"Applied processor reduces input query to empty string, \"\n \"all comparisons will have score 0. \"\n \"[Query: \\'{0}\\']\".format(query))\n\n # Don't run full_process twice\n if scorer in [fuzz.WRatio, fuzz.QRatio,\n fuzz.token_set_ratio, fuzz.token_sort_ratio,\n fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio,\n fuzz.UWRatio, fuzz.UQRatio] \\\n and processor == utils.full_process:\n processor = no_process\n\n # Only process the query once instead of for every choice\n if scorer in [fuzz.UWRatio, fuzz.UQRatio]:\n pre_processor = partial(utils.full_process, force_ascii=False)\n scorer = partial(scorer, full_process=False)\n elif scorer in [fuzz.WRatio, fuzz.QRatio,\n fuzz.token_set_ratio, fuzz.token_sort_ratio,\n fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]:\n pre_processor = partial(utils.full_process, force_ascii=True)\n scorer = partial(scorer, full_process=False)\n else:\n pre_processor = no_process\n processed_query = pre_processor(processed_query)\n\n try:\n # See if choices is a dictionary-like object.\n for key, choice in choices.items():\n processed = pre_processor(processor(choice))\n score = scorer(processed_query, processed)\n if score >= score_cutoff:\n yield (choice, score, key)\n except AttributeError:\n # It's a list; just iterate over it.\n for choice in choices:\n processed = pre_processor(processor(choice))\n score = scorer(processed_query, processed)\n if score >= score_cutoff:\n yield (choice, score)\n" ]
#!/usr/bin/env python # encoding: utf-8 from . import fuzz from . import utils import heapq import logging from functools import partial default_scorer = fuzz.WRatio default_processor = utils.full_process def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a generator of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. Returns: Generator of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return ('train', 22, 'bard'), ('man', 0, 'dog') """ # Catch generators without lengths def no_process(x): return x try: if choices is None or len(choices) == 0: raise StopIteration except TypeError: pass # If the processor was removed by setting it to None # perfom a noop as it still needs to be a function if processor is None: processor = no_process # Run the processor on the input query. processed_query = processor(query) if len(processed_query) == 0: logging.warning(u"Applied processor reduces input query to empty string, " "all comparisons will have score 0. " "[Query: \'{0}\']".format(query)) # Don't run full_process twice if scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio, fuzz.UWRatio, fuzz.UQRatio] \ and processor == utils.full_process: processor = no_process # Only process the query once instead of for every choice if scorer in [fuzz.UWRatio, fuzz.UQRatio]: pre_processor = partial(utils.full_process, force_ascii=False) scorer = partial(scorer, full_process=False) elif scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]: pre_processor = partial(utils.full_process, force_ascii=True) scorer = partial(scorer, full_process=False) else: pre_processor = no_process processed_query = pre_processor(processed_query) try: # See if choices is a dictionary-like object. for key, choice in choices.items(): processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score, key) except AttributeError: # It's a list; just iterate over it. for choice in choices: processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score) def extractBests(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0, limit=5): """Get a list of the best matches to a collection of choices. Convenience function for getting the choices with best scores. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: A a list of (match, score) tuples. """ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) return heapq.nlargest(limit, best_list, key=lambda i: i[1]) if limit is not None else \ sorted(best_list, key=lambda i: i[1], reverse=True) def extractOne(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Find the single best match above a score in a list of choices. This is a convenience method which returns the single best choice. See extract() for the full arguments list. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. If the best match is found, but it is not greater than this number, then return None anyway ("not a good enough match"). Defaults to 0. Returns: A tuple containing a single match and its score, if a match was found that was above score_cutoff. Otherwise, returns None. """ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) try: return max(best_list, key=lambda i: i[1]) except ValueError: return None def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio): """This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify and remove duplicates. Specifically, it uses the process.extract to identify duplicates that score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list since we assume this item contains the most entity information and returns that. It breaks string length ties on an alphabetical sort. Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less sensitive. Args: contains_dupes: A list of strings that we would like to dedupe. threshold: the numerical value (0,100) point at which we expect to find duplicates. Defaults to 70 out of 100 scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.token_set_ratio() is used and expects both query and choice to be strings. Returns: A deduplicated list. For example: In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins'] In: fuzzy_dedupe(contains_dupes) Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf'] """ extractor = [] # iterate over items in *contains_dupes* for item in contains_dupes: # return all duplicate matches found matches = extract(item, contains_dupes, limit=None, scorer=scorer) # filter matches based on the threshold filtered = [x for x in matches if x[1] > threshold] # if there is only 1 item in *filtered*, no duplicates were found so append to *extracted* if len(filtered) == 1: extractor.append(filtered[0][0]) else: # alpha sort filtered = sorted(filtered, key=lambda x: x[0]) # length sort filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True) # take first item as our 'canonical example' extractor.append(filter_sort[0][0]) # uniquify *extractor* list keys = {} for e in extractor: keys[e] = 1 extractor = keys.keys() # check that extractor differs from contain_dupes (e.g. duplicates were found) # if not, then return the original list if len(extractor) == len(contains_dupes): return contains_dupes else: return extractor
seatgeek/fuzzywuzzy
fuzzywuzzy/process.py
extractBests
python
def extractBests(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0, limit=5): best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) return heapq.nlargest(limit, best_list, key=lambda i: i[1]) if limit is not None else \ sorted(best_list, key=lambda i: i[1], reverse=True)
Get a list of the best matches to a collection of choices. Convenience function for getting the choices with best scores. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: A a list of (match, score) tuples.
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/process.py#L172-L194
[ "def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0):\n \"\"\"Select the best match in a list or dictionary of choices.\n\n Find best matches in a list or dictionary of choices, return a\n generator of tuples containing the match and its score. If a dictionary\n is used, also returns the key for each match.\n\n Arguments:\n query: An object representing the thing we want to find.\n choices: An iterable or dictionary-like object containing choices\n to be matched against the query. Dictionary arguments of\n {key: value} pairs will attempt to match the query against\n each value.\n processor: Optional function of the form f(a) -> b, where a is the query or\n individual choice and b is the choice to be used in matching.\n\n This can be used to match against, say, the first element of\n a list:\n\n lambda x: x[0]\n\n Defaults to fuzzywuzzy.utils.full_process().\n scorer: Optional function for scoring matches between the query and\n an individual processed choice. This should be a function\n of the form f(query, choice) -> int.\n\n By default, fuzz.WRatio() is used and expects both query and\n choice to be strings.\n score_cutoff: Optional argument for score threshold. No matches with\n a score less than this number will be returned. Defaults to 0.\n\n Returns:\n Generator of tuples containing the match and its score.\n\n If a list is used for choices, then the result will be 2-tuples.\n If a dictionary is used, then the result will be 3-tuples containing\n the key for each match.\n\n For example, searching for 'bird' in the dictionary\n\n {'bard': 'train', 'dog': 'man'}\n\n may return\n\n ('train', 22, 'bard'), ('man', 0, 'dog')\n \"\"\"\n # Catch generators without lengths\n def no_process(x):\n return x\n\n try:\n if choices is None or len(choices) == 0:\n raise StopIteration\n except TypeError:\n pass\n\n # If the processor was removed by setting it to None\n # perfom a noop as it still needs to be a function\n if processor is None:\n processor = no_process\n\n # Run the processor on the input query.\n processed_query = processor(query)\n\n if len(processed_query) == 0:\n logging.warning(u\"Applied processor reduces input query to empty string, \"\n \"all comparisons will have score 0. \"\n \"[Query: \\'{0}\\']\".format(query))\n\n # Don't run full_process twice\n if scorer in [fuzz.WRatio, fuzz.QRatio,\n fuzz.token_set_ratio, fuzz.token_sort_ratio,\n fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio,\n fuzz.UWRatio, fuzz.UQRatio] \\\n and processor == utils.full_process:\n processor = no_process\n\n # Only process the query once instead of for every choice\n if scorer in [fuzz.UWRatio, fuzz.UQRatio]:\n pre_processor = partial(utils.full_process, force_ascii=False)\n scorer = partial(scorer, full_process=False)\n elif scorer in [fuzz.WRatio, fuzz.QRatio,\n fuzz.token_set_ratio, fuzz.token_sort_ratio,\n fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]:\n pre_processor = partial(utils.full_process, force_ascii=True)\n scorer = partial(scorer, full_process=False)\n else:\n pre_processor = no_process\n processed_query = pre_processor(processed_query)\n\n try:\n # See if choices is a dictionary-like object.\n for key, choice in choices.items():\n processed = pre_processor(processor(choice))\n score = scorer(processed_query, processed)\n if score >= score_cutoff:\n yield (choice, score, key)\n except AttributeError:\n # It's a list; just iterate over it.\n for choice in choices:\n processed = pre_processor(processor(choice))\n score = scorer(processed_query, processed)\n if score >= score_cutoff:\n yield (choice, score)\n" ]
#!/usr/bin/env python # encoding: utf-8 from . import fuzz from . import utils import heapq import logging from functools import partial default_scorer = fuzz.WRatio default_processor = utils.full_process def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a generator of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. Returns: Generator of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return ('train', 22, 'bard'), ('man', 0, 'dog') """ # Catch generators without lengths def no_process(x): return x try: if choices is None or len(choices) == 0: raise StopIteration except TypeError: pass # If the processor was removed by setting it to None # perfom a noop as it still needs to be a function if processor is None: processor = no_process # Run the processor on the input query. processed_query = processor(query) if len(processed_query) == 0: logging.warning(u"Applied processor reduces input query to empty string, " "all comparisons will have score 0. " "[Query: \'{0}\']".format(query)) # Don't run full_process twice if scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio, fuzz.UWRatio, fuzz.UQRatio] \ and processor == utils.full_process: processor = no_process # Only process the query once instead of for every choice if scorer in [fuzz.UWRatio, fuzz.UQRatio]: pre_processor = partial(utils.full_process, force_ascii=False) scorer = partial(scorer, full_process=False) elif scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]: pre_processor = partial(utils.full_process, force_ascii=True) scorer = partial(scorer, full_process=False) else: pre_processor = no_process processed_query = pre_processor(processed_query) try: # See if choices is a dictionary-like object. for key, choice in choices.items(): processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score, key) except AttributeError: # It's a list; just iterate over it. for choice in choices: processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score) def extract(query, choices, processor=default_processor, scorer=default_scorer, limit=5): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a list of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: List of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return [('train', 22, 'bard'), ('man', 0, 'dog')] """ sl = extractWithoutOrder(query, choices, processor, scorer) return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \ sorted(sl, key=lambda i: i[1], reverse=True) def extractOne(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Find the single best match above a score in a list of choices. This is a convenience method which returns the single best choice. See extract() for the full arguments list. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. If the best match is found, but it is not greater than this number, then return None anyway ("not a good enough match"). Defaults to 0. Returns: A tuple containing a single match and its score, if a match was found that was above score_cutoff. Otherwise, returns None. """ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) try: return max(best_list, key=lambda i: i[1]) except ValueError: return None def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio): """This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify and remove duplicates. Specifically, it uses the process.extract to identify duplicates that score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list since we assume this item contains the most entity information and returns that. It breaks string length ties on an alphabetical sort. Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less sensitive. Args: contains_dupes: A list of strings that we would like to dedupe. threshold: the numerical value (0,100) point at which we expect to find duplicates. Defaults to 70 out of 100 scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.token_set_ratio() is used and expects both query and choice to be strings. Returns: A deduplicated list. For example: In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins'] In: fuzzy_dedupe(contains_dupes) Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf'] """ extractor = [] # iterate over items in *contains_dupes* for item in contains_dupes: # return all duplicate matches found matches = extract(item, contains_dupes, limit=None, scorer=scorer) # filter matches based on the threshold filtered = [x for x in matches if x[1] > threshold] # if there is only 1 item in *filtered*, no duplicates were found so append to *extracted* if len(filtered) == 1: extractor.append(filtered[0][0]) else: # alpha sort filtered = sorted(filtered, key=lambda x: x[0]) # length sort filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True) # take first item as our 'canonical example' extractor.append(filter_sort[0][0]) # uniquify *extractor* list keys = {} for e in extractor: keys[e] = 1 extractor = keys.keys() # check that extractor differs from contain_dupes (e.g. duplicates were found) # if not, then return the original list if len(extractor) == len(contains_dupes): return contains_dupes else: return extractor
seatgeek/fuzzywuzzy
fuzzywuzzy/process.py
extractOne
python
def extractOne(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) try: return max(best_list, key=lambda i: i[1]) except ValueError: return None
Find the single best match above a score in a list of choices. This is a convenience method which returns the single best choice. See extract() for the full arguments list. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. If the best match is found, but it is not greater than this number, then return None anyway ("not a good enough match"). Defaults to 0. Returns: A tuple containing a single match and its score, if a match was found that was above score_cutoff. Otherwise, returns None.
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/process.py#L197-L222
[ "def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0):\n \"\"\"Select the best match in a list or dictionary of choices.\n\n Find best matches in a list or dictionary of choices, return a\n generator of tuples containing the match and its score. If a dictionary\n is used, also returns the key for each match.\n\n Arguments:\n query: An object representing the thing we want to find.\n choices: An iterable or dictionary-like object containing choices\n to be matched against the query. Dictionary arguments of\n {key: value} pairs will attempt to match the query against\n each value.\n processor: Optional function of the form f(a) -> b, where a is the query or\n individual choice and b is the choice to be used in matching.\n\n This can be used to match against, say, the first element of\n a list:\n\n lambda x: x[0]\n\n Defaults to fuzzywuzzy.utils.full_process().\n scorer: Optional function for scoring matches between the query and\n an individual processed choice. This should be a function\n of the form f(query, choice) -> int.\n\n By default, fuzz.WRatio() is used and expects both query and\n choice to be strings.\n score_cutoff: Optional argument for score threshold. No matches with\n a score less than this number will be returned. Defaults to 0.\n\n Returns:\n Generator of tuples containing the match and its score.\n\n If a list is used for choices, then the result will be 2-tuples.\n If a dictionary is used, then the result will be 3-tuples containing\n the key for each match.\n\n For example, searching for 'bird' in the dictionary\n\n {'bard': 'train', 'dog': 'man'}\n\n may return\n\n ('train', 22, 'bard'), ('man', 0, 'dog')\n \"\"\"\n # Catch generators without lengths\n def no_process(x):\n return x\n\n try:\n if choices is None or len(choices) == 0:\n raise StopIteration\n except TypeError:\n pass\n\n # If the processor was removed by setting it to None\n # perfom a noop as it still needs to be a function\n if processor is None:\n processor = no_process\n\n # Run the processor on the input query.\n processed_query = processor(query)\n\n if len(processed_query) == 0:\n logging.warning(u\"Applied processor reduces input query to empty string, \"\n \"all comparisons will have score 0. \"\n \"[Query: \\'{0}\\']\".format(query))\n\n # Don't run full_process twice\n if scorer in [fuzz.WRatio, fuzz.QRatio,\n fuzz.token_set_ratio, fuzz.token_sort_ratio,\n fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio,\n fuzz.UWRatio, fuzz.UQRatio] \\\n and processor == utils.full_process:\n processor = no_process\n\n # Only process the query once instead of for every choice\n if scorer in [fuzz.UWRatio, fuzz.UQRatio]:\n pre_processor = partial(utils.full_process, force_ascii=False)\n scorer = partial(scorer, full_process=False)\n elif scorer in [fuzz.WRatio, fuzz.QRatio,\n fuzz.token_set_ratio, fuzz.token_sort_ratio,\n fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]:\n pre_processor = partial(utils.full_process, force_ascii=True)\n scorer = partial(scorer, full_process=False)\n else:\n pre_processor = no_process\n processed_query = pre_processor(processed_query)\n\n try:\n # See if choices is a dictionary-like object.\n for key, choice in choices.items():\n processed = pre_processor(processor(choice))\n score = scorer(processed_query, processed)\n if score >= score_cutoff:\n yield (choice, score, key)\n except AttributeError:\n # It's a list; just iterate over it.\n for choice in choices:\n processed = pre_processor(processor(choice))\n score = scorer(processed_query, processed)\n if score >= score_cutoff:\n yield (choice, score)\n" ]
#!/usr/bin/env python # encoding: utf-8 from . import fuzz from . import utils import heapq import logging from functools import partial default_scorer = fuzz.WRatio default_processor = utils.full_process def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a generator of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. Returns: Generator of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return ('train', 22, 'bard'), ('man', 0, 'dog') """ # Catch generators without lengths def no_process(x): return x try: if choices is None or len(choices) == 0: raise StopIteration except TypeError: pass # If the processor was removed by setting it to None # perfom a noop as it still needs to be a function if processor is None: processor = no_process # Run the processor on the input query. processed_query = processor(query) if len(processed_query) == 0: logging.warning(u"Applied processor reduces input query to empty string, " "all comparisons will have score 0. " "[Query: \'{0}\']".format(query)) # Don't run full_process twice if scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio, fuzz.UWRatio, fuzz.UQRatio] \ and processor == utils.full_process: processor = no_process # Only process the query once instead of for every choice if scorer in [fuzz.UWRatio, fuzz.UQRatio]: pre_processor = partial(utils.full_process, force_ascii=False) scorer = partial(scorer, full_process=False) elif scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]: pre_processor = partial(utils.full_process, force_ascii=True) scorer = partial(scorer, full_process=False) else: pre_processor = no_process processed_query = pre_processor(processed_query) try: # See if choices is a dictionary-like object. for key, choice in choices.items(): processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score, key) except AttributeError: # It's a list; just iterate over it. for choice in choices: processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score) def extract(query, choices, processor=default_processor, scorer=default_scorer, limit=5): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a list of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: List of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return [('train', 22, 'bard'), ('man', 0, 'dog')] """ sl = extractWithoutOrder(query, choices, processor, scorer) return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \ sorted(sl, key=lambda i: i[1], reverse=True) def extractBests(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0, limit=5): """Get a list of the best matches to a collection of choices. Convenience function for getting the choices with best scores. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: A a list of (match, score) tuples. """ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) return heapq.nlargest(limit, best_list, key=lambda i: i[1]) if limit is not None else \ sorted(best_list, key=lambda i: i[1], reverse=True) def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio): """This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify and remove duplicates. Specifically, it uses the process.extract to identify duplicates that score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list since we assume this item contains the most entity information and returns that. It breaks string length ties on an alphabetical sort. Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less sensitive. Args: contains_dupes: A list of strings that we would like to dedupe. threshold: the numerical value (0,100) point at which we expect to find duplicates. Defaults to 70 out of 100 scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.token_set_ratio() is used and expects both query and choice to be strings. Returns: A deduplicated list. For example: In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins'] In: fuzzy_dedupe(contains_dupes) Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf'] """ extractor = [] # iterate over items in *contains_dupes* for item in contains_dupes: # return all duplicate matches found matches = extract(item, contains_dupes, limit=None, scorer=scorer) # filter matches based on the threshold filtered = [x for x in matches if x[1] > threshold] # if there is only 1 item in *filtered*, no duplicates were found so append to *extracted* if len(filtered) == 1: extractor.append(filtered[0][0]) else: # alpha sort filtered = sorted(filtered, key=lambda x: x[0]) # length sort filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True) # take first item as our 'canonical example' extractor.append(filter_sort[0][0]) # uniquify *extractor* list keys = {} for e in extractor: keys[e] = 1 extractor = keys.keys() # check that extractor differs from contain_dupes (e.g. duplicates were found) # if not, then return the original list if len(extractor) == len(contains_dupes): return contains_dupes else: return extractor
seatgeek/fuzzywuzzy
fuzzywuzzy/process.py
dedupe
python
def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio): extractor = [] # iterate over items in *contains_dupes* for item in contains_dupes: # return all duplicate matches found matches = extract(item, contains_dupes, limit=None, scorer=scorer) # filter matches based on the threshold filtered = [x for x in matches if x[1] > threshold] # if there is only 1 item in *filtered*, no duplicates were found so append to *extracted* if len(filtered) == 1: extractor.append(filtered[0][0]) else: # alpha sort filtered = sorted(filtered, key=lambda x: x[0]) # length sort filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True) # take first item as our 'canonical example' extractor.append(filter_sort[0][0]) # uniquify *extractor* list keys = {} for e in extractor: keys[e] = 1 extractor = keys.keys() # check that extractor differs from contain_dupes (e.g. duplicates were found) # if not, then return the original list if len(extractor) == len(contains_dupes): return contains_dupes else: return extractor
This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify and remove duplicates. Specifically, it uses the process.extract to identify duplicates that score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list since we assume this item contains the most entity information and returns that. It breaks string length ties on an alphabetical sort. Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less sensitive. Args: contains_dupes: A list of strings that we would like to dedupe. threshold: the numerical value (0,100) point at which we expect to find duplicates. Defaults to 70 out of 100 scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.token_set_ratio() is used and expects both query and choice to be strings. Returns: A deduplicated list. For example: In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins'] In: fuzzy_dedupe(contains_dupes) Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf']
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/process.py#L225-L285
[ "def extract(query, choices, processor=default_processor, scorer=default_scorer, limit=5):\n \"\"\"Select the best match in a list or dictionary of choices.\n\n Find best matches in a list or dictionary of choices, return a\n list of tuples containing the match and its score. If a dictionary\n is used, also returns the key for each match.\n\n Arguments:\n query: An object representing the thing we want to find.\n choices: An iterable or dictionary-like object containing choices\n to be matched against the query. Dictionary arguments of\n {key: value} pairs will attempt to match the query against\n each value.\n processor: Optional function of the form f(a) -> b, where a is the query or\n individual choice and b is the choice to be used in matching.\n\n This can be used to match against, say, the first element of\n a list:\n\n lambda x: x[0]\n\n Defaults to fuzzywuzzy.utils.full_process().\n scorer: Optional function for scoring matches between the query and\n an individual processed choice. This should be a function\n of the form f(query, choice) -> int.\n By default, fuzz.WRatio() is used and expects both query and\n choice to be strings.\n limit: Optional maximum for the number of elements returned. Defaults\n to 5.\n\n Returns:\n List of tuples containing the match and its score.\n\n If a list is used for choices, then the result will be 2-tuples.\n If a dictionary is used, then the result will be 3-tuples containing\n the key for each match.\n\n For example, searching for 'bird' in the dictionary\n\n {'bard': 'train', 'dog': 'man'}\n\n may return\n\n [('train', 22, 'bard'), ('man', 0, 'dog')]\n \"\"\"\n sl = extractWithoutOrder(query, choices, processor, scorer)\n return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \\\n sorted(sl, key=lambda i: i[1], reverse=True)\n" ]
#!/usr/bin/env python # encoding: utf-8 from . import fuzz from . import utils import heapq import logging from functools import partial default_scorer = fuzz.WRatio default_processor = utils.full_process def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a generator of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. Returns: Generator of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return ('train', 22, 'bard'), ('man', 0, 'dog') """ # Catch generators without lengths def no_process(x): return x try: if choices is None or len(choices) == 0: raise StopIteration except TypeError: pass # If the processor was removed by setting it to None # perfom a noop as it still needs to be a function if processor is None: processor = no_process # Run the processor on the input query. processed_query = processor(query) if len(processed_query) == 0: logging.warning(u"Applied processor reduces input query to empty string, " "all comparisons will have score 0. " "[Query: \'{0}\']".format(query)) # Don't run full_process twice if scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio, fuzz.UWRatio, fuzz.UQRatio] \ and processor == utils.full_process: processor = no_process # Only process the query once instead of for every choice if scorer in [fuzz.UWRatio, fuzz.UQRatio]: pre_processor = partial(utils.full_process, force_ascii=False) scorer = partial(scorer, full_process=False) elif scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]: pre_processor = partial(utils.full_process, force_ascii=True) scorer = partial(scorer, full_process=False) else: pre_processor = no_process processed_query = pre_processor(processed_query) try: # See if choices is a dictionary-like object. for key, choice in choices.items(): processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score, key) except AttributeError: # It's a list; just iterate over it. for choice in choices: processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score) def extract(query, choices, processor=default_processor, scorer=default_scorer, limit=5): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a list of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: List of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return [('train', 22, 'bard'), ('man', 0, 'dog')] """ sl = extractWithoutOrder(query, choices, processor, scorer) return heapq.nlargest(limit, sl, key=lambda i: i[1]) if limit is not None else \ sorted(sl, key=lambda i: i[1], reverse=True) def extractBests(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0, limit=5): """Get a list of the best matches to a collection of choices. Convenience function for getting the choices with best scores. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. limit: Optional maximum for the number of elements returned. Defaults to 5. Returns: A a list of (match, score) tuples. """ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) return heapq.nlargest(limit, best_list, key=lambda i: i[1]) if limit is not None else \ sorted(best_list, key=lambda i: i[1], reverse=True) def extractOne(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Find the single best match above a score in a list of choices. This is a convenience method which returns the single best choice. See extract() for the full arguments list. Args: query: A string to match against choices: A list or dictionary of choices, suitable for use with extract(). processor: Optional function for transforming choices before matching. See extract(). scorer: Scoring function for extract(). score_cutoff: Optional argument for score threshold. If the best match is found, but it is not greater than this number, then return None anyway ("not a good enough match"). Defaults to 0. Returns: A tuple containing a single match and its score, if a match was found that was above score_cutoff. Otherwise, returns None. """ best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff) try: return max(best_list, key=lambda i: i[1]) except ValueError: return None
seatgeek/fuzzywuzzy
fuzzywuzzy/utils.py
make_type_consistent
python
def make_type_consistent(s1, s2): if isinstance(s1, str) and isinstance(s2, str): return s1, s2 elif isinstance(s1, unicode) and isinstance(s2, unicode): return s1, s2 else: return unicode(s1), unicode(s2)
If both objects aren't either both string or unicode instances force them to unicode
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/utils.py#L73-L82
null
from __future__ import unicode_literals import sys import functools from fuzzywuzzy.string_processing import StringProcessor PY3 = sys.version_info[0] == 3 def validate_string(s): """ Check input has length and that length > 0 :param s: :return: True if len(s) > 0 else False """ try: return len(s) > 0 except TypeError: return False def check_for_equivalence(func): @functools.wraps(func) def decorator(*args, **kwargs): if args[0] == args[1]: return 100 return func(*args, **kwargs) return decorator def check_for_none(func): @functools.wraps(func) def decorator(*args, **kwargs): if args[0] is None or args[1] is None: return 0 return func(*args, **kwargs) return decorator def check_empty_string(func): @functools.wraps(func) def decorator(*args, **kwargs): if len(args[0]) == 0 or len(args[1]) == 0: return 0 return func(*args, **kwargs) return decorator bad_chars = str("").join([chr(i) for i in range(128, 256)]) # ascii dammit! if PY3: translation_table = dict((ord(c), None) for c in bad_chars) unicode = str def asciionly(s): if PY3: return s.translate(translation_table) else: return s.translate(None, bad_chars) def asciidammit(s): if type(s) is str: return asciionly(s) elif type(s) is unicode: return asciionly(s.encode('ascii', 'ignore')) else: return asciidammit(unicode(s)) def full_process(s, force_ascii=False): """Process string by -- removing all but letters and numbers -- trim whitespace -- force to lower case if force_ascii == True, force convert to ascii""" if force_ascii: s = asciidammit(s) # Keep only Letters and Numbers (see Unicode docs). string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s) # Force into lowercase. string_out = StringProcessor.to_lower_case(string_out) # Remove leading and trailing whitespaces. string_out = StringProcessor.strip(string_out) return string_out def intr(n): '''Returns a correctly rounded integer''' return int(round(n))
seatgeek/fuzzywuzzy
fuzzywuzzy/utils.py
full_process
python
def full_process(s, force_ascii=False): if force_ascii: s = asciidammit(s) # Keep only Letters and Numbers (see Unicode docs). string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s) # Force into lowercase. string_out = StringProcessor.to_lower_case(string_out) # Remove leading and trailing whitespaces. string_out = StringProcessor.strip(string_out) return string_out
Process string by -- removing all but letters and numbers -- trim whitespace -- force to lower case if force_ascii == True, force convert to ascii
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/utils.py#L85-L100
[ "def asciidammit(s):\n if type(s) is str:\n return asciionly(s)\n elif type(s) is unicode:\n return asciionly(s.encode('ascii', 'ignore'))\n else:\n return asciidammit(unicode(s))\n", "def replace_non_letters_non_numbers_with_whitespace(cls, a_string):\n \"\"\"\n This function replaces any sequence of non letters and non\n numbers with a single white space.\n \"\"\"\n return cls.regex.sub(\" \", a_string)\n" ]
from __future__ import unicode_literals import sys import functools from fuzzywuzzy.string_processing import StringProcessor PY3 = sys.version_info[0] == 3 def validate_string(s): """ Check input has length and that length > 0 :param s: :return: True if len(s) > 0 else False """ try: return len(s) > 0 except TypeError: return False def check_for_equivalence(func): @functools.wraps(func) def decorator(*args, **kwargs): if args[0] == args[1]: return 100 return func(*args, **kwargs) return decorator def check_for_none(func): @functools.wraps(func) def decorator(*args, **kwargs): if args[0] is None or args[1] is None: return 0 return func(*args, **kwargs) return decorator def check_empty_string(func): @functools.wraps(func) def decorator(*args, **kwargs): if len(args[0]) == 0 or len(args[1]) == 0: return 0 return func(*args, **kwargs) return decorator bad_chars = str("").join([chr(i) for i in range(128, 256)]) # ascii dammit! if PY3: translation_table = dict((ord(c), None) for c in bad_chars) unicode = str def asciionly(s): if PY3: return s.translate(translation_table) else: return s.translate(None, bad_chars) def asciidammit(s): if type(s) is str: return asciionly(s) elif type(s) is unicode: return asciionly(s.encode('ascii', 'ignore')) else: return asciidammit(unicode(s)) def make_type_consistent(s1, s2): """If both objects aren't either both string or unicode instances force them to unicode""" if isinstance(s1, str) and isinstance(s2, str): return s1, s2 elif isinstance(s1, unicode) and isinstance(s2, unicode): return s1, s2 else: return unicode(s1), unicode(s2) def intr(n): '''Returns a correctly rounded integer''' return int(round(n))
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
partial_ratio
python
def partial_ratio(s1, s2): " s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores))
Return the ratio of the most similar substring as a number between 0 and 100.
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L34-L68
[ "def make_type_consistent(s1, s2):\n \"\"\"If both objects aren't either both string or unicode instances force them to unicode\"\"\"\n if isinstance(s1, str) and isinstance(s2, str):\n return s1, s2\n\n elif isinstance(s1, unicode) and isinstance(s2, unicode):\n return s1, s2\n\n else:\n return unicode(s1), unicode(s2)\n", "def intr(n):\n '''Returns a correctly rounded integer'''\n return int(round(n))\n", "def get_matching_blocks(self):\n if not self._matching_blocks:\n self._matching_blocks = matching_blocks(self.get_opcodes(),\n self._str1, self._str2)\n return self._matching_blocks\n", "def ratio(self):\n if not self._ratio:\n self._ratio = ratio(self._str1, self._str2)\n return self._ratio\n" ]
#!/usr/bin/env python # encoding: utf-8 from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: if platform.python_implementation() != "PyPy": warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string ############################## # Advanced Scoring Functions # ############################## def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip() # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise) def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def QRatio(s1, s2, force_ascii=True, full_process=True): """ Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2) def UQRatio(s1, s2, full_process=True): """ Unicode quick ratio Calls QRatio with force_ascii set to False :param s1: :param s2: :return: similarity ratio """ return QRatio(s1, s2, force_ascii=False, full_process=full_process) # w is for weighted def WRatio(s1, s2, force_ascii=True, full_process=True): """ Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser)) def UWRatio(s1, s2, full_process=True): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode. """ return WRatio(s1, s2, force_ascii=False, full_process=full_process)
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
_process_and_sort
python
def _process_and_sort(s, force_ascii, full_process=True): # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip()
Return a cleaned string with token sorted.
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L75-L83
null
#!/usr/bin/env python # encoding: utf-8 from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: if platform.python_implementation() != "PyPy": warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def partial_ratio(s1, s2): """"Return the ratio of the most similar substring as a number between 0 and 100.""" s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores)) ############################## # Advanced Scoring Functions # ############################## # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise) def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def QRatio(s1, s2, force_ascii=True, full_process=True): """ Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2) def UQRatio(s1, s2, full_process=True): """ Unicode quick ratio Calls QRatio with force_ascii set to False :param s1: :param s2: :return: similarity ratio """ return QRatio(s1, s2, force_ascii=False, full_process=full_process) # w is for weighted def WRatio(s1, s2, force_ascii=True, full_process=True): """ Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser)) def UWRatio(s1, s2, full_process=True): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode. """ return WRatio(s1, s2, force_ascii=False, full_process=full_process)
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
token_sort_ratio
python
def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing.
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L101-L105
null
#!/usr/bin/env python # encoding: utf-8 from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: if platform.python_implementation() != "PyPy": warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def partial_ratio(s1, s2): """"Return the ratio of the most similar substring as a number between 0 and 100.""" s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores)) ############################## # Advanced Scoring Functions # ############################## def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip() # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise) def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def QRatio(s1, s2, force_ascii=True, full_process=True): """ Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2) def UQRatio(s1, s2, full_process=True): """ Unicode quick ratio Calls QRatio with force_ascii set to False :param s1: :param s2: :return: similarity ratio """ return QRatio(s1, s2, force_ascii=False, full_process=full_process) # w is for weighted def WRatio(s1, s2, force_ascii=True, full_process=True): """ Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser)) def UWRatio(s1, s2, full_process=True): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode. """ return WRatio(s1, s2, force_ascii=False, full_process=full_process)
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
partial_token_sort_ratio
python
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing.
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L108-L112
null
#!/usr/bin/env python # encoding: utf-8 from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: if platform.python_implementation() != "PyPy": warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def partial_ratio(s1, s2): """"Return the ratio of the most similar substring as a number between 0 and 100.""" s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores)) ############################## # Advanced Scoring Functions # ############################## def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip() # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise) def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def QRatio(s1, s2, force_ascii=True, full_process=True): """ Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2) def UQRatio(s1, s2, full_process=True): """ Unicode quick ratio Calls QRatio with force_ascii set to False :param s1: :param s2: :return: similarity ratio """ return QRatio(s1, s2, force_ascii=False, full_process=full_process) # w is for weighted def WRatio(s1, s2, force_ascii=True, full_process=True): """ Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser)) def UWRatio(s1, s2, full_process=True): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode. """ return WRatio(s1, s2, force_ascii=False, full_process=full_process)
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
_token_set
python
def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise)
Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L116-L165
null
#!/usr/bin/env python # encoding: utf-8 from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: if platform.python_implementation() != "PyPy": warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def partial_ratio(s1, s2): """"Return the ratio of the most similar substring as a number between 0 and 100.""" s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores)) ############################## # Advanced Scoring Functions # ############################## def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip() # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def QRatio(s1, s2, force_ascii=True, full_process=True): """ Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2) def UQRatio(s1, s2, full_process=True): """ Unicode quick ratio Calls QRatio with force_ascii set to False :param s1: :param s2: :return: similarity ratio """ return QRatio(s1, s2, force_ascii=False, full_process=full_process) # w is for weighted def WRatio(s1, s2, force_ascii=True, full_process=True): """ Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser)) def UWRatio(s1, s2, full_process=True): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode. """ return WRatio(s1, s2, force_ascii=False, full_process=full_process)
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
QRatio
python
def QRatio(s1, s2, force_ascii=True, full_process=True): if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2)
Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L181-L207
[ "def full_process(s, force_ascii=False):\n \"\"\"Process string by\n -- removing all but letters and numbers\n -- trim whitespace\n -- force to lower case\n if force_ascii == True, force convert to ascii\"\"\"\n\n if force_ascii:\n s = asciidammit(s)\n # Keep only Letters and Numbers (see Unicode docs).\n string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s)\n # Force into lowercase.\n string_out = StringProcessor.to_lower_case(string_out)\n # Remove leading and trailing whitespaces.\n string_out = StringProcessor.strip(string_out)\n return string_out\n", "def validate_string(s):\n \"\"\"\n Check input has length and that length > 0\n\n :param s:\n :return: True if len(s) > 0 else False\n \"\"\"\n try:\n return len(s) > 0\n except TypeError:\n return False\n" ]
#!/usr/bin/env python # encoding: utf-8 from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: if platform.python_implementation() != "PyPy": warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def partial_ratio(s1, s2): """"Return the ratio of the most similar substring as a number between 0 and 100.""" s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores)) ############################## # Advanced Scoring Functions # ############################## def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip() # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise) def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def UQRatio(s1, s2, full_process=True): """ Unicode quick ratio Calls QRatio with force_ascii set to False :param s1: :param s2: :return: similarity ratio """ return QRatio(s1, s2, force_ascii=False, full_process=full_process) # w is for weighted def WRatio(s1, s2, force_ascii=True, full_process=True): """ Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser)) def UWRatio(s1, s2, full_process=True): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode. """ return WRatio(s1, s2, force_ascii=False, full_process=full_process)
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
UQRatio
python
def UQRatio(s1, s2, full_process=True): return QRatio(s1, s2, force_ascii=False, full_process=full_process)
Unicode quick ratio Calls QRatio with force_ascii set to False :param s1: :param s2: :return: similarity ratio
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L210-L220
[ "def QRatio(s1, s2, force_ascii=True, full_process=True):\n \"\"\"\n Quick ratio comparison between two strings.\n\n Runs full_process from utils on both strings\n Short circuits if either of the strings is empty after processing.\n\n :param s1:\n :param s2:\n :param force_ascii: Allow only ASCII characters (Default: True)\n :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)\n :return: similarity ratio\n \"\"\"\n\n if full_process:\n p1 = utils.full_process(s1, force_ascii=force_ascii)\n p2 = utils.full_process(s2, force_ascii=force_ascii)\n else:\n p1 = s1\n p2 = s2\n\n if not utils.validate_string(p1):\n return 0\n if not utils.validate_string(p2):\n return 0\n\n return ratio(p1, p2)\n" ]
#!/usr/bin/env python # encoding: utf-8 from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: if platform.python_implementation() != "PyPy": warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def partial_ratio(s1, s2): """"Return the ratio of the most similar substring as a number between 0 and 100.""" s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores)) ############################## # Advanced Scoring Functions # ############################## def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip() # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise) def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def QRatio(s1, s2, force_ascii=True, full_process=True): """ Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2) # w is for weighted def WRatio(s1, s2, force_ascii=True, full_process=True): """ Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser)) def UWRatio(s1, s2, full_process=True): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode. """ return WRatio(s1, s2, force_ascii=False, full_process=full_process)
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
WRatio
python
def WRatio(s1, s2, force_ascii=True, full_process=True): if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser))
Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return:
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L224-L299
[ "def full_process(s, force_ascii=False):\n \"\"\"Process string by\n -- removing all but letters and numbers\n -- trim whitespace\n -- force to lower case\n if force_ascii == True, force convert to ascii\"\"\"\n\n if force_ascii:\n s = asciidammit(s)\n # Keep only Letters and Numbers (see Unicode docs).\n string_out = StringProcessor.replace_non_letters_non_numbers_with_whitespace(s)\n # Force into lowercase.\n string_out = StringProcessor.to_lower_case(string_out)\n # Remove leading and trailing whitespaces.\n string_out = StringProcessor.strip(string_out)\n return string_out\n", "def validate_string(s):\n \"\"\"\n Check input has length and that length > 0\n\n :param s:\n :return: True if len(s) > 0 else False\n \"\"\"\n try:\n return len(s) > 0\n except TypeError:\n return False\n" ]
#!/usr/bin/env python # encoding: utf-8 from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: if platform.python_implementation() != "PyPy": warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def partial_ratio(s1, s2): """"Return the ratio of the most similar substring as a number between 0 and 100.""" s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores)) ############################## # Advanced Scoring Functions # ############################## def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip() # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise) def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def QRatio(s1, s2, force_ascii=True, full_process=True): """ Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2) def UQRatio(s1, s2, full_process=True): """ Unicode quick ratio Calls QRatio with force_ascii set to False :param s1: :param s2: :return: similarity ratio """ return QRatio(s1, s2, force_ascii=False, full_process=full_process) # w is for weighted def UWRatio(s1, s2, full_process=True): """Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode. """ return WRatio(s1, s2, force_ascii=False, full_process=full_process)
seatgeek/fuzzywuzzy
fuzzywuzzy/fuzz.py
UWRatio
python
def UWRatio(s1, s2, full_process=True): return WRatio(s1, s2, force_ascii=False, full_process=full_process)
Return a measure of the sequences' similarity between 0 and 100, using different algorithms. Same as WRatio but preserving unicode.
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/fuzzywuzzy/fuzz.py#L302-L306
[ "def WRatio(s1, s2, force_ascii=True, full_process=True):\n \"\"\"\n Return a measure of the sequences' similarity between 0 and 100, using different algorithms.\n\n **Steps in the order they occur**\n\n #. Run full_process from utils on both strings\n #. Short circuit if this makes either string empty\n #. Take the ratio of the two processed strings (fuzz.ratio)\n #. Run checks to compare the length of the strings\n * If one of the strings is more than 1.5 times as long as the other\n use partial_ratio comparisons - scale partial results by 0.9\n (this makes sure only full results can return 100)\n * If one of the strings is over 8 times as long as the other\n instead scale by 0.6\n\n #. Run the other ratio functions\n * if using partial ratio functions call partial_ratio,\n partial_token_sort_ratio and partial_token_set_ratio\n scale all of these by the ratio based on length\n * otherwise call token_sort_ratio and token_set_ratio\n * all token based comparisons are scaled by 0.95\n (on top of any partial scalars)\n\n #. Take the highest value from these results\n round it and return it as an integer.\n\n :param s1:\n :param s2:\n :param force_ascii: Allow only ascii characters\n :type force_ascii: bool\n :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True)\n :return:\n \"\"\"\n\n if full_process:\n p1 = utils.full_process(s1, force_ascii=force_ascii)\n p2 = utils.full_process(s2, force_ascii=force_ascii)\n else:\n p1 = s1\n p2 = s2\n\n if not utils.validate_string(p1):\n return 0\n if not utils.validate_string(p2):\n return 0\n\n # should we look at partials?\n try_partial = True\n unbase_scale = .95\n partial_scale = .90\n\n base = ratio(p1, p2)\n len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2))\n\n # if strings are similar length, don't use partials\n if len_ratio < 1.5:\n try_partial = False\n\n # if one string is much much shorter than the other\n if len_ratio > 8:\n partial_scale = .6\n\n if try_partial:\n partial = partial_ratio(p1, p2) * partial_scale\n ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \\\n * unbase_scale * partial_scale\n ptser = partial_token_set_ratio(p1, p2, full_process=False) \\\n * unbase_scale * partial_scale\n\n return utils.intr(max(base, partial, ptsor, ptser))\n else:\n tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale\n tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale\n\n return utils.intr(max(base, tsor, tser))\n" ]
#!/usr/bin/env python # encoding: utf-8 from __future__ import unicode_literals import platform import warnings try: from .StringMatcher import StringMatcher as SequenceMatcher except ImportError: if platform.python_implementation() != "PyPy": warnings.warn('Using slow pure-python SequenceMatcher. Install python-Levenshtein to remove this warning') from difflib import SequenceMatcher from . import utils ########################### # Basic Scoring Functions # ########################### @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def ratio(s1, s2): s1, s2 = utils.make_type_consistent(s1, s2) m = SequenceMatcher(None, s1, s2) return utils.intr(100 * m.ratio()) @utils.check_for_none @utils.check_for_equivalence @utils.check_empty_string def partial_ratio(s1, s2): """"Return the ratio of the most similar substring as a number between 0 and 100.""" s1, s2 = utils.make_type_consistent(s1, s2) if len(s1) <= len(s2): shorter = s1 longer = s2 else: shorter = s2 longer = s1 m = SequenceMatcher(None, shorter, longer) blocks = m.get_matching_blocks() # each block represents a sequence of matching characters in a string # of the form (idx_1, idx_2, len) # the best partial match will block align with at least one of those blocks # e.g. shorter = "abcd", longer = XXXbcdeEEE # block = (1,3,3) # best score === ratio("abcd", "Xbcd") scores = [] for block in blocks: long_start = block[1] - block[0] if (block[1] - block[0]) > 0 else 0 long_end = long_start + len(shorter) long_substr = longer[long_start:long_end] m2 = SequenceMatcher(None, shorter, long_substr) r = m2.ratio() if r > .995: return 100 else: scores.append(r) return utils.intr(100 * max(scores)) ############################## # Advanced Scoring Functions # ############################## def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip() # Sorted Token # find all alphanumeric tokens in the string # sort those tokens and take ratio of resulting joined strings # controls for unordered string elements @utils.check_for_none def _token_sort(s1, s2, partial=True, force_ascii=True, full_process=True): sorted1 = _process_and_sort(s1, force_ascii, full_process=full_process) sorted2 = _process_and_sort(s2, force_ascii, full_process=full_process) if partial: return partial_ratio(sorted1, sorted2) else: return ratio(sorted1, sorted2) def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) @utils.check_for_none def _token_set(s1, s2, partial=True, force_ascii=True, full_process=True): """Find all alphanumeric tokens in each string... - treat them as a set - construct two strings of the form: <sorted_intersection><sorted_remainder> - take ratios of those two strings - controls for unordered partial matches""" if not full_process and s1 == s2: return 100 p1 = utils.full_process(s1, force_ascii=force_ascii) if full_process else s1 p2 = utils.full_process(s2, force_ascii=force_ascii) if full_process else s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # pull tokens tokens1 = set(p1.split()) tokens2 = set(p2.split()) intersection = tokens1.intersection(tokens2) diff1to2 = tokens1.difference(tokens2) diff2to1 = tokens2.difference(tokens1) sorted_sect = " ".join(sorted(intersection)) sorted_1to2 = " ".join(sorted(diff1to2)) sorted_2to1 = " ".join(sorted(diff2to1)) combined_1to2 = sorted_sect + " " + sorted_1to2 combined_2to1 = sorted_sect + " " + sorted_2to1 # strip sorted_sect = sorted_sect.strip() combined_1to2 = combined_1to2.strip() combined_2to1 = combined_2to1.strip() if partial: ratio_func = partial_ratio else: ratio_func = ratio pairwise = [ ratio_func(sorted_sect, combined_1to2), ratio_func(sorted_sect, combined_2to1), ratio_func(combined_1to2, combined_2to1) ] return max(pairwise) def token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process) def partial_token_set_ratio(s1, s2, force_ascii=True, full_process=True): return _token_set(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process) ################### # Combination API # ################### # q is for quick def QRatio(s1, s2, force_ascii=True, full_process=True): """ Quick ratio comparison between two strings. Runs full_process from utils on both strings Short circuits if either of the strings is empty after processing. :param s1: :param s2: :param force_ascii: Allow only ASCII characters (Default: True) :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: similarity ratio """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 return ratio(p1, p2) def UQRatio(s1, s2, full_process=True): """ Unicode quick ratio Calls QRatio with force_ascii set to False :param s1: :param s2: :return: similarity ratio """ return QRatio(s1, s2, force_ascii=False, full_process=full_process) # w is for weighted def WRatio(s1, s2, force_ascii=True, full_process=True): """ Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser))
seatgeek/fuzzywuzzy
benchmarks.py
print_result_from_timeit
python
def print_result_from_timeit(stmt='pass', setup='pass', number=1000000): units = ["s", "ms", "us", "ns"] duration = timeit(stmt, setup, number=int(number)) avg_duration = duration / float(number) thousands = int(math.floor(math.log(avg_duration, 1000))) print("Total time: %fs. Average run: %.3f%s." % ( duration, avg_duration * (1000 ** -thousands), units[-thousands]))
Clean function to know how much time took the execution of one statement
train
https://github.com/seatgeek/fuzzywuzzy/blob/778162c5a73256745eb6ae22f925bc2dbcf7c894/benchmarks.py#L47-L57
null
# -*- coding: utf8 -*- from timeit import timeit import math import csv iterations = 100000 reader = csv.DictReader(open('data/titledata.csv'), delimiter='|') titles = [i['custom_title'] for i in reader] title_blob = '\n'.join(titles) cirque_strings = [ "cirque du soleil - zarkana - las vegas", "cirque du soleil ", "cirque du soleil las vegas", "zarkana las vegas", "las vegas cirque du soleil at the bellagio", "zarakana - cirque du soleil - bellagio" ] choices = [ "", "new york yankees vs boston red sox", "", "zarakana - cirque du soleil - bellagio", None, "cirque du soleil las vegas", None ] mixed_strings = [ "Lorem Ipsum is simply dummy text of the printing and typesetting industry.", "C\\'est la vie", u"Ça va?", u"Cães danados", u"\xacCamarões assados", u"a\xac\u1234\u20ac\U00008000" ] common_setup = "from fuzzywuzzy import fuzz, utils; " basic_setup = "from fuzzywuzzy.string_processing import StringProcessor;" for s in choices: print('Test validate_string for: "%s"' % s) print_result_from_timeit('utils.validate_string(\'%s\')' % s, common_setup, number=iterations) print('') for s in mixed_strings + cirque_strings + choices: print('Test full_process for: "%s"' % s) print_result_from_timeit('utils.full_process(u\'%s\')' % s, common_setup + basic_setup, number=iterations) # benchmarking the core matching methods... for s in cirque_strings: print('Test fuzz.ratio for string: "%s"' % s) print('-------------------------------') print_result_from_timeit('fuzz.ratio(u\'cirque du soleil\', u\'%s\')' % s, common_setup + basic_setup, number=iterations / 100) for s in cirque_strings: print('Test fuzz.partial_ratio for string: "%s"' % s) print('-------------------------------') print_result_from_timeit('fuzz.partial_ratio(u\'cirque du soleil\', u\'%s\')' % s, common_setup + basic_setup, number=iterations / 100) for s in cirque_strings: print('Test fuzz.WRatio for string: "%s"' % s) print('-------------------------------') print_result_from_timeit('fuzz.WRatio(u\'cirque du soleil\', u\'%s\')' % s, common_setup + basic_setup, number=iterations / 100) print('Test process.exract(scorer = fuzz.QRatio) for string: "%s"' % s) print('-------------------------------') print_result_from_timeit('process.extract(u\'cirque du soleil\', choices, scorer = fuzz.QRatio)', common_setup + basic_setup + " from fuzzywuzzy import process; import string,random; random.seed(18);" " choices = [\'\'.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30)) for s in range(5000)]", number=10) print('Test process.exract(scorer = fuzz.WRatio) for string: "%s"' % s) print('-------------------------------') print_result_from_timeit('process.extract(u\'cirque du soleil\', choices, scorer = fuzz.WRatio)', common_setup + basic_setup + " from fuzzywuzzy import process; import string,random; random.seed(18);" " choices = [\'\'.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30)) for s in range(5000)]", number=10) # let me show you something s = 'New York Yankees' test = 'import functools\n' test += 'title_blob = """%s"""\n' % title_blob test += 'title_blob = title_blob.strip()\n' test += 'titles = title_blob.split("\\n")\n' print('Real world ratio(): "%s"' % s) print('-------------------------------') test += 'prepared_ratio = functools.partial(fuzz.ratio, "%s")\n' % s test += 'titles.sort(key=prepared_ratio)\n' print_result_from_timeit(test, common_setup + basic_setup, number=100)
ipazc/mtcnn
mtcnn/mtcnn.py
MTCNN.__scale_image
python
def __scale_image(image, scale: float): height, width, _ = image.shape width_scaled = int(np.ceil(width * scale)) height_scaled = int(np.ceil(height * scale)) im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA) # Normalize the image's pixels im_data_normalized = (im_data - 127.5) * 0.0078125 return im_data_normalized
Scales the image to a given scale. :param image: :param scale: :return:
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/mtcnn.py#L230-L247
null
class MTCNN(object): """ Allows to perform MTCNN Detection -> a) Detection of faces (with the confidence probability) b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) """ def __init__(self, weights_file: str=None, min_face_size: int=20, steps_threshold: list=None, scale_factor: float=0.709): """ Initializes the MTCNN. :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load the ones bundled with the package. :param min_face_size: minimum size of the face to detect :param steps_threshold: step's thresholds values :param scale_factor: scale factor """ if steps_threshold is None: steps_threshold = [0.6, 0.7, 0.7] if weights_file is None: weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') self.__min_face_size = min_face_size self.__steps_threshold = steps_threshold self.__scale_factor = scale_factor config = tf.ConfigProto(log_device_placement=False) config.gpu_options.allow_growth = True self.__graph = tf.Graph() with self.__graph.as_default(): self.__session = tf.Session(config=config, graph=self.__graph) weights = np.load(weights_file).item() self.__pnet = PNet(self.__session, False) self.__pnet.set_weights(weights['PNet']) self.__rnet = RNet(self.__session, False) self.__rnet.set_weights(weights['RNet']) self.__onet = ONet(self.__session, False) self.__onet.set_weights(weights['ONet']) weights_file.close() @property def min_face_size(self): return self.__min_face_size @min_face_size.setter def min_face_size(self, mfc=20): try: self.__min_face_size = int(mfc) except ValueError: self.__min_face_size = 20 def __compute_scale_pyramid(self, m, min_layer): scales = [] factor_count = 0 while min_layer >= 12: scales += [m * np.power(self.__scale_factor, factor_count)] min_layer = min_layer * self.__scale_factor factor_count += 1 return scales @staticmethod @staticmethod def __generate_bounding_box(imap, reg, scale, t): # use heatmap to generate bounding boxes stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty(shape=(0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1)/scale) q2 = np.fix((stride * bb + cellsize)/scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg @staticmethod def __nms(boxes, threshold, method): """ Non Maximum Suppression. :param boxes: np array with bounding boxes. :param threshold: :param method: NMS method to apply. Available values ('Min', 'Union') :return: """ if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] s = boxes[:, 4] area = (x2 - x1 + 1) * (y2 - y1 + 1) sorted_s = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while sorted_s.size > 0: i = sorted_s[-1] pick[counter] = i counter += 1 idx = sorted_s[0:-1] xx1 = np.maximum(x1[i], x1[idx]) yy1 = np.maximum(y1[i], y1[idx]) xx2 = np.minimum(x2[i], x2[idx]) yy2 = np.minimum(y2[i], y2[idx]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h if method is 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) sorted_s = sorted_s[np.where(o <= threshold)] pick = pick[0:counter] return pick @staticmethod def __pad(total_boxes, w, h): # compute the padding coordinates (pad the bounding boxes to square) tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) numbox = total_boxes.shape[0] dx = np.ones(numbox, dtype=np.int32) dy = np.ones(numbox, dtype=np.int32) edx = tmpw.copy().astype(np.int32) edy = tmph.copy().astype(np.int32) x = total_boxes[:, 0].copy().astype(np.int32) y = total_boxes[:, 1].copy().astype(np.int32) ex = total_boxes[:, 2].copy().astype(np.int32) ey = total_boxes[:, 3].copy().astype(np.int32) tmp = np.where(ex > w) edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) ex[tmp] = w tmp = np.where(ey > h) edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) ey[tmp] = h tmp = np.where(x < 1) dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) x[tmp] = 1 tmp = np.where(y < 1) dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) y[tmp] = 1 return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph @staticmethod def __rerec(bbox): # convert bbox to square h = bbox[:, 3] - bbox[:, 1] w = bbox[:, 2] - bbox[:, 0] l = np.maximum(w, h) bbox[:, 0] = bbox[:, 0] + w * 0.5 - l * 0.5 bbox[:, 1] = bbox[:, 1] + h * 0.5 - l * 0.5 bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(l, (2, 1))) return bbox @staticmethod def __bbreg(boundingbox, reg): # calibrate bounding boxes if reg.shape[1] == 1: reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) return boundingbox def detect_faces(self, img) -> list: """ Detects bounding boxes from the specified image. :param img: image to process :return: list containing all the bounding boxes detected with their keypoints. """ if img is None or not hasattr(img, "shape"): raise InvalidImage("Image not valid.") height, width, _ = img.shape stage_status = StageStatus(width=width, height=height) m = 12 / self.__min_face_size min_layer = np.amin([height, width]) * m scales = self.__compute_scale_pyramid(m, min_layer) stages = [self.__stage1, self.__stage2, self.__stage3] result = [scales, stage_status] # We pipe here each of the stages for stage in stages: result = stage(img, result[0], result[1]) [total_boxes, points] = result bounding_boxes = [] for bounding_box, keypoints in zip(total_boxes, points.T): bounding_boxes.append({ 'box': [int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])], 'confidence': bounding_box[-1], 'keypoints': { 'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9])), } } ) return bounding_boxes def __stage1(self, image, scales: list, stage_status: StageStatus): """ First stage of the MTCNN. :param image: :param scales: :param stage_status: :return: """ total_boxes = np.empty((0, 9)) status = stage_status for scale in scales: scaled_image = self.__scale_image(image, scale) img_x = np.expand_dims(scaled_image, 0) img_y = np.transpose(img_x, (0, 2, 1, 3)) out = self.__pnet.feed(img_y) out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, self.__steps_threshold[0]) # inter-scale nms pick = self.__nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numboxes = total_boxes.shape[0] if numboxes > 0: pick = self.__nms(total_boxes.copy(), 0.7, 'Union') total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = self.__rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) return total_boxes, status def __stage2(self, img, total_boxes, stage_status:StageStatus): """ Second stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, stage_status # second stage tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), stage_status tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__rnet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] ipass = np.where(score > self.__steps_threshold[1]) total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] if total_boxes.shape[0] > 0: pick = self.__nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = self.__rerec(total_boxes.copy()) return total_boxes, stage_status def __stage3(self, img, total_boxes, stage_status: StageStatus): """ Third stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, np.empty(shape=(0,)) total_boxes = np.fix(total_boxes).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) tempimg = np.zeros((48, 48, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), np.empty(shape=(0,)) tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__onet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) score = out2[1, :] points = out1 ipass = np.where(score > self.__steps_threshold[2]) points = points[:, ipass[0]] total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 if total_boxes.shape[0] > 0: total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) pick = self.__nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] return total_boxes, points def __del__(self): self.__session.close()
ipazc/mtcnn
mtcnn/mtcnn.py
MTCNN.__nms
python
def __nms(boxes, threshold, method): if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] s = boxes[:, 4] area = (x2 - x1 + 1) * (y2 - y1 + 1) sorted_s = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while sorted_s.size > 0: i = sorted_s[-1] pick[counter] = i counter += 1 idx = sorted_s[0:-1] xx1 = np.maximum(x1[i], x1[idx]) yy1 = np.maximum(y1[i], y1[idx]) xx2 = np.minimum(x2[i], x2[idx]) yy2 = np.minimum(y2[i], y2[idx]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h if method is 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) sorted_s = sorted_s[np.where(o <= threshold)] pick = pick[0:counter] return pick
Non Maximum Suppression. :param boxes: np array with bounding boxes. :param threshold: :param method: NMS method to apply. Available values ('Min', 'Union') :return:
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/mtcnn.py#L285-L333
null
class MTCNN(object): """ Allows to perform MTCNN Detection -> a) Detection of faces (with the confidence probability) b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) """ def __init__(self, weights_file: str=None, min_face_size: int=20, steps_threshold: list=None, scale_factor: float=0.709): """ Initializes the MTCNN. :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load the ones bundled with the package. :param min_face_size: minimum size of the face to detect :param steps_threshold: step's thresholds values :param scale_factor: scale factor """ if steps_threshold is None: steps_threshold = [0.6, 0.7, 0.7] if weights_file is None: weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') self.__min_face_size = min_face_size self.__steps_threshold = steps_threshold self.__scale_factor = scale_factor config = tf.ConfigProto(log_device_placement=False) config.gpu_options.allow_growth = True self.__graph = tf.Graph() with self.__graph.as_default(): self.__session = tf.Session(config=config, graph=self.__graph) weights = np.load(weights_file).item() self.__pnet = PNet(self.__session, False) self.__pnet.set_weights(weights['PNet']) self.__rnet = RNet(self.__session, False) self.__rnet.set_weights(weights['RNet']) self.__onet = ONet(self.__session, False) self.__onet.set_weights(weights['ONet']) weights_file.close() @property def min_face_size(self): return self.__min_face_size @min_face_size.setter def min_face_size(self, mfc=20): try: self.__min_face_size = int(mfc) except ValueError: self.__min_face_size = 20 def __compute_scale_pyramid(self, m, min_layer): scales = [] factor_count = 0 while min_layer >= 12: scales += [m * np.power(self.__scale_factor, factor_count)] min_layer = min_layer * self.__scale_factor factor_count += 1 return scales @staticmethod def __scale_image(image, scale: float): """ Scales the image to a given scale. :param image: :param scale: :return: """ height, width, _ = image.shape width_scaled = int(np.ceil(width * scale)) height_scaled = int(np.ceil(height * scale)) im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA) # Normalize the image's pixels im_data_normalized = (im_data - 127.5) * 0.0078125 return im_data_normalized @staticmethod def __generate_bounding_box(imap, reg, scale, t): # use heatmap to generate bounding boxes stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty(shape=(0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1)/scale) q2 = np.fix((stride * bb + cellsize)/scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg @staticmethod @staticmethod def __pad(total_boxes, w, h): # compute the padding coordinates (pad the bounding boxes to square) tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) numbox = total_boxes.shape[0] dx = np.ones(numbox, dtype=np.int32) dy = np.ones(numbox, dtype=np.int32) edx = tmpw.copy().astype(np.int32) edy = tmph.copy().astype(np.int32) x = total_boxes[:, 0].copy().astype(np.int32) y = total_boxes[:, 1].copy().astype(np.int32) ex = total_boxes[:, 2].copy().astype(np.int32) ey = total_boxes[:, 3].copy().astype(np.int32) tmp = np.where(ex > w) edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) ex[tmp] = w tmp = np.where(ey > h) edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) ey[tmp] = h tmp = np.where(x < 1) dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) x[tmp] = 1 tmp = np.where(y < 1) dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) y[tmp] = 1 return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph @staticmethod def __rerec(bbox): # convert bbox to square h = bbox[:, 3] - bbox[:, 1] w = bbox[:, 2] - bbox[:, 0] l = np.maximum(w, h) bbox[:, 0] = bbox[:, 0] + w * 0.5 - l * 0.5 bbox[:, 1] = bbox[:, 1] + h * 0.5 - l * 0.5 bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(l, (2, 1))) return bbox @staticmethod def __bbreg(boundingbox, reg): # calibrate bounding boxes if reg.shape[1] == 1: reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) return boundingbox def detect_faces(self, img) -> list: """ Detects bounding boxes from the specified image. :param img: image to process :return: list containing all the bounding boxes detected with their keypoints. """ if img is None or not hasattr(img, "shape"): raise InvalidImage("Image not valid.") height, width, _ = img.shape stage_status = StageStatus(width=width, height=height) m = 12 / self.__min_face_size min_layer = np.amin([height, width]) * m scales = self.__compute_scale_pyramid(m, min_layer) stages = [self.__stage1, self.__stage2, self.__stage3] result = [scales, stage_status] # We pipe here each of the stages for stage in stages: result = stage(img, result[0], result[1]) [total_boxes, points] = result bounding_boxes = [] for bounding_box, keypoints in zip(total_boxes, points.T): bounding_boxes.append({ 'box': [int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])], 'confidence': bounding_box[-1], 'keypoints': { 'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9])), } } ) return bounding_boxes def __stage1(self, image, scales: list, stage_status: StageStatus): """ First stage of the MTCNN. :param image: :param scales: :param stage_status: :return: """ total_boxes = np.empty((0, 9)) status = stage_status for scale in scales: scaled_image = self.__scale_image(image, scale) img_x = np.expand_dims(scaled_image, 0) img_y = np.transpose(img_x, (0, 2, 1, 3)) out = self.__pnet.feed(img_y) out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, self.__steps_threshold[0]) # inter-scale nms pick = self.__nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numboxes = total_boxes.shape[0] if numboxes > 0: pick = self.__nms(total_boxes.copy(), 0.7, 'Union') total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = self.__rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) return total_boxes, status def __stage2(self, img, total_boxes, stage_status:StageStatus): """ Second stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, stage_status # second stage tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), stage_status tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__rnet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] ipass = np.where(score > self.__steps_threshold[1]) total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] if total_boxes.shape[0] > 0: pick = self.__nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = self.__rerec(total_boxes.copy()) return total_boxes, stage_status def __stage3(self, img, total_boxes, stage_status: StageStatus): """ Third stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, np.empty(shape=(0,)) total_boxes = np.fix(total_boxes).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) tempimg = np.zeros((48, 48, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), np.empty(shape=(0,)) tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__onet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) score = out2[1, :] points = out1 ipass = np.where(score > self.__steps_threshold[2]) points = points[:, ipass[0]] total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 if total_boxes.shape[0] > 0: total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) pick = self.__nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] return total_boxes, points def __del__(self): self.__session.close()
ipazc/mtcnn
mtcnn/mtcnn.py
MTCNN.detect_faces
python
def detect_faces(self, img) -> list: if img is None or not hasattr(img, "shape"): raise InvalidImage("Image not valid.") height, width, _ = img.shape stage_status = StageStatus(width=width, height=height) m = 12 / self.__min_face_size min_layer = np.amin([height, width]) * m scales = self.__compute_scale_pyramid(m, min_layer) stages = [self.__stage1, self.__stage2, self.__stage3] result = [scales, stage_status] # We pipe here each of the stages for stage in stages: result = stage(img, result[0], result[1]) [total_boxes, points] = result bounding_boxes = [] for bounding_box, keypoints in zip(total_boxes, points.T): bounding_boxes.append({ 'box': [int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])], 'confidence': bounding_box[-1], 'keypoints': { 'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9])), } } ) return bounding_boxes
Detects bounding boxes from the specified image. :param img: image to process :return: list containing all the bounding boxes detected with their keypoints.
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/mtcnn.py#L396-L440
[ "def __compute_scale_pyramid(self, m, min_layer):\n scales = []\n factor_count = 0\n\n while min_layer >= 12:\n scales += [m * np.power(self.__scale_factor, factor_count)]\n min_layer = min_layer * self.__scale_factor\n factor_count += 1\n\n return scales\n" ]
class MTCNN(object): """ Allows to perform MTCNN Detection -> a) Detection of faces (with the confidence probability) b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) """ def __init__(self, weights_file: str=None, min_face_size: int=20, steps_threshold: list=None, scale_factor: float=0.709): """ Initializes the MTCNN. :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load the ones bundled with the package. :param min_face_size: minimum size of the face to detect :param steps_threshold: step's thresholds values :param scale_factor: scale factor """ if steps_threshold is None: steps_threshold = [0.6, 0.7, 0.7] if weights_file is None: weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') self.__min_face_size = min_face_size self.__steps_threshold = steps_threshold self.__scale_factor = scale_factor config = tf.ConfigProto(log_device_placement=False) config.gpu_options.allow_growth = True self.__graph = tf.Graph() with self.__graph.as_default(): self.__session = tf.Session(config=config, graph=self.__graph) weights = np.load(weights_file).item() self.__pnet = PNet(self.__session, False) self.__pnet.set_weights(weights['PNet']) self.__rnet = RNet(self.__session, False) self.__rnet.set_weights(weights['RNet']) self.__onet = ONet(self.__session, False) self.__onet.set_weights(weights['ONet']) weights_file.close() @property def min_face_size(self): return self.__min_face_size @min_face_size.setter def min_face_size(self, mfc=20): try: self.__min_face_size = int(mfc) except ValueError: self.__min_face_size = 20 def __compute_scale_pyramid(self, m, min_layer): scales = [] factor_count = 0 while min_layer >= 12: scales += [m * np.power(self.__scale_factor, factor_count)] min_layer = min_layer * self.__scale_factor factor_count += 1 return scales @staticmethod def __scale_image(image, scale: float): """ Scales the image to a given scale. :param image: :param scale: :return: """ height, width, _ = image.shape width_scaled = int(np.ceil(width * scale)) height_scaled = int(np.ceil(height * scale)) im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA) # Normalize the image's pixels im_data_normalized = (im_data - 127.5) * 0.0078125 return im_data_normalized @staticmethod def __generate_bounding_box(imap, reg, scale, t): # use heatmap to generate bounding boxes stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty(shape=(0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1)/scale) q2 = np.fix((stride * bb + cellsize)/scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg @staticmethod def __nms(boxes, threshold, method): """ Non Maximum Suppression. :param boxes: np array with bounding boxes. :param threshold: :param method: NMS method to apply. Available values ('Min', 'Union') :return: """ if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] s = boxes[:, 4] area = (x2 - x1 + 1) * (y2 - y1 + 1) sorted_s = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while sorted_s.size > 0: i = sorted_s[-1] pick[counter] = i counter += 1 idx = sorted_s[0:-1] xx1 = np.maximum(x1[i], x1[idx]) yy1 = np.maximum(y1[i], y1[idx]) xx2 = np.minimum(x2[i], x2[idx]) yy2 = np.minimum(y2[i], y2[idx]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h if method is 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) sorted_s = sorted_s[np.where(o <= threshold)] pick = pick[0:counter] return pick @staticmethod def __pad(total_boxes, w, h): # compute the padding coordinates (pad the bounding boxes to square) tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) numbox = total_boxes.shape[0] dx = np.ones(numbox, dtype=np.int32) dy = np.ones(numbox, dtype=np.int32) edx = tmpw.copy().astype(np.int32) edy = tmph.copy().astype(np.int32) x = total_boxes[:, 0].copy().astype(np.int32) y = total_boxes[:, 1].copy().astype(np.int32) ex = total_boxes[:, 2].copy().astype(np.int32) ey = total_boxes[:, 3].copy().astype(np.int32) tmp = np.where(ex > w) edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) ex[tmp] = w tmp = np.where(ey > h) edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) ey[tmp] = h tmp = np.where(x < 1) dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) x[tmp] = 1 tmp = np.where(y < 1) dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) y[tmp] = 1 return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph @staticmethod def __rerec(bbox): # convert bbox to square h = bbox[:, 3] - bbox[:, 1] w = bbox[:, 2] - bbox[:, 0] l = np.maximum(w, h) bbox[:, 0] = bbox[:, 0] + w * 0.5 - l * 0.5 bbox[:, 1] = bbox[:, 1] + h * 0.5 - l * 0.5 bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(l, (2, 1))) return bbox @staticmethod def __bbreg(boundingbox, reg): # calibrate bounding boxes if reg.shape[1] == 1: reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) return boundingbox def __stage1(self, image, scales: list, stage_status: StageStatus): """ First stage of the MTCNN. :param image: :param scales: :param stage_status: :return: """ total_boxes = np.empty((0, 9)) status = stage_status for scale in scales: scaled_image = self.__scale_image(image, scale) img_x = np.expand_dims(scaled_image, 0) img_y = np.transpose(img_x, (0, 2, 1, 3)) out = self.__pnet.feed(img_y) out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, self.__steps_threshold[0]) # inter-scale nms pick = self.__nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numboxes = total_boxes.shape[0] if numboxes > 0: pick = self.__nms(total_boxes.copy(), 0.7, 'Union') total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = self.__rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) return total_boxes, status def __stage2(self, img, total_boxes, stage_status:StageStatus): """ Second stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, stage_status # second stage tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), stage_status tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__rnet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] ipass = np.where(score > self.__steps_threshold[1]) total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] if total_boxes.shape[0] > 0: pick = self.__nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = self.__rerec(total_boxes.copy()) return total_boxes, stage_status def __stage3(self, img, total_boxes, stage_status: StageStatus): """ Third stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, np.empty(shape=(0,)) total_boxes = np.fix(total_boxes).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) tempimg = np.zeros((48, 48, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), np.empty(shape=(0,)) tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__onet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) score = out2[1, :] points = out1 ipass = np.where(score > self.__steps_threshold[2]) points = points[:, ipass[0]] total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 if total_boxes.shape[0] > 0: total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) pick = self.__nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] return total_boxes, points def __del__(self): self.__session.close()
ipazc/mtcnn
mtcnn/mtcnn.py
MTCNN.__stage1
python
def __stage1(self, image, scales: list, stage_status: StageStatus): total_boxes = np.empty((0, 9)) status = stage_status for scale in scales: scaled_image = self.__scale_image(image, scale) img_x = np.expand_dims(scaled_image, 0) img_y = np.transpose(img_x, (0, 2, 1, 3)) out = self.__pnet.feed(img_y) out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, self.__steps_threshold[0]) # inter-scale nms pick = self.__nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numboxes = total_boxes.shape[0] if numboxes > 0: pick = self.__nms(total_boxes.copy(), 0.7, 'Union') total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = self.__rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) return total_boxes, status
First stage of the MTCNN. :param image: :param scales: :param stage_status: :return:
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/mtcnn.py#L442-L494
null
class MTCNN(object): """ Allows to perform MTCNN Detection -> a) Detection of faces (with the confidence probability) b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) """ def __init__(self, weights_file: str=None, min_face_size: int=20, steps_threshold: list=None, scale_factor: float=0.709): """ Initializes the MTCNN. :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load the ones bundled with the package. :param min_face_size: minimum size of the face to detect :param steps_threshold: step's thresholds values :param scale_factor: scale factor """ if steps_threshold is None: steps_threshold = [0.6, 0.7, 0.7] if weights_file is None: weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') self.__min_face_size = min_face_size self.__steps_threshold = steps_threshold self.__scale_factor = scale_factor config = tf.ConfigProto(log_device_placement=False) config.gpu_options.allow_growth = True self.__graph = tf.Graph() with self.__graph.as_default(): self.__session = tf.Session(config=config, graph=self.__graph) weights = np.load(weights_file).item() self.__pnet = PNet(self.__session, False) self.__pnet.set_weights(weights['PNet']) self.__rnet = RNet(self.__session, False) self.__rnet.set_weights(weights['RNet']) self.__onet = ONet(self.__session, False) self.__onet.set_weights(weights['ONet']) weights_file.close() @property def min_face_size(self): return self.__min_face_size @min_face_size.setter def min_face_size(self, mfc=20): try: self.__min_face_size = int(mfc) except ValueError: self.__min_face_size = 20 def __compute_scale_pyramid(self, m, min_layer): scales = [] factor_count = 0 while min_layer >= 12: scales += [m * np.power(self.__scale_factor, factor_count)] min_layer = min_layer * self.__scale_factor factor_count += 1 return scales @staticmethod def __scale_image(image, scale: float): """ Scales the image to a given scale. :param image: :param scale: :return: """ height, width, _ = image.shape width_scaled = int(np.ceil(width * scale)) height_scaled = int(np.ceil(height * scale)) im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA) # Normalize the image's pixels im_data_normalized = (im_data - 127.5) * 0.0078125 return im_data_normalized @staticmethod def __generate_bounding_box(imap, reg, scale, t): # use heatmap to generate bounding boxes stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty(shape=(0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1)/scale) q2 = np.fix((stride * bb + cellsize)/scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg @staticmethod def __nms(boxes, threshold, method): """ Non Maximum Suppression. :param boxes: np array with bounding boxes. :param threshold: :param method: NMS method to apply. Available values ('Min', 'Union') :return: """ if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] s = boxes[:, 4] area = (x2 - x1 + 1) * (y2 - y1 + 1) sorted_s = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while sorted_s.size > 0: i = sorted_s[-1] pick[counter] = i counter += 1 idx = sorted_s[0:-1] xx1 = np.maximum(x1[i], x1[idx]) yy1 = np.maximum(y1[i], y1[idx]) xx2 = np.minimum(x2[i], x2[idx]) yy2 = np.minimum(y2[i], y2[idx]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h if method is 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) sorted_s = sorted_s[np.where(o <= threshold)] pick = pick[0:counter] return pick @staticmethod def __pad(total_boxes, w, h): # compute the padding coordinates (pad the bounding boxes to square) tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) numbox = total_boxes.shape[0] dx = np.ones(numbox, dtype=np.int32) dy = np.ones(numbox, dtype=np.int32) edx = tmpw.copy().astype(np.int32) edy = tmph.copy().astype(np.int32) x = total_boxes[:, 0].copy().astype(np.int32) y = total_boxes[:, 1].copy().astype(np.int32) ex = total_boxes[:, 2].copy().astype(np.int32) ey = total_boxes[:, 3].copy().astype(np.int32) tmp = np.where(ex > w) edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) ex[tmp] = w tmp = np.where(ey > h) edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) ey[tmp] = h tmp = np.where(x < 1) dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) x[tmp] = 1 tmp = np.where(y < 1) dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) y[tmp] = 1 return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph @staticmethod def __rerec(bbox): # convert bbox to square h = bbox[:, 3] - bbox[:, 1] w = bbox[:, 2] - bbox[:, 0] l = np.maximum(w, h) bbox[:, 0] = bbox[:, 0] + w * 0.5 - l * 0.5 bbox[:, 1] = bbox[:, 1] + h * 0.5 - l * 0.5 bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(l, (2, 1))) return bbox @staticmethod def __bbreg(boundingbox, reg): # calibrate bounding boxes if reg.shape[1] == 1: reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) return boundingbox def detect_faces(self, img) -> list: """ Detects bounding boxes from the specified image. :param img: image to process :return: list containing all the bounding boxes detected with their keypoints. """ if img is None or not hasattr(img, "shape"): raise InvalidImage("Image not valid.") height, width, _ = img.shape stage_status = StageStatus(width=width, height=height) m = 12 / self.__min_face_size min_layer = np.amin([height, width]) * m scales = self.__compute_scale_pyramid(m, min_layer) stages = [self.__stage1, self.__stage2, self.__stage3] result = [scales, stage_status] # We pipe here each of the stages for stage in stages: result = stage(img, result[0], result[1]) [total_boxes, points] = result bounding_boxes = [] for bounding_box, keypoints in zip(total_boxes, points.T): bounding_boxes.append({ 'box': [int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])], 'confidence': bounding_box[-1], 'keypoints': { 'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9])), } } ) return bounding_boxes def __stage2(self, img, total_boxes, stage_status:StageStatus): """ Second stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, stage_status # second stage tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), stage_status tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__rnet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] ipass = np.where(score > self.__steps_threshold[1]) total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] if total_boxes.shape[0] > 0: pick = self.__nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = self.__rerec(total_boxes.copy()) return total_boxes, stage_status def __stage3(self, img, total_boxes, stage_status: StageStatus): """ Third stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, np.empty(shape=(0,)) total_boxes = np.fix(total_boxes).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) tempimg = np.zeros((48, 48, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), np.empty(shape=(0,)) tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__onet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) score = out2[1, :] points = out1 ipass = np.where(score > self.__steps_threshold[2]) points = points[:, ipass[0]] total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 if total_boxes.shape[0] > 0: total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) pick = self.__nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] return total_boxes, points def __del__(self): self.__session.close()
ipazc/mtcnn
mtcnn/mtcnn.py
MTCNN.__stage2
python
def __stage2(self, img, total_boxes, stage_status:StageStatus): num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, stage_status # second stage tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), stage_status tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__rnet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] ipass = np.where(score > self.__steps_threshold[1]) total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] if total_boxes.shape[0] > 0: pick = self.__nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = self.__rerec(total_boxes.copy()) return total_boxes, stage_status
Second stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return:
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/mtcnn.py#L496-L547
null
class MTCNN(object): """ Allows to perform MTCNN Detection -> a) Detection of faces (with the confidence probability) b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) """ def __init__(self, weights_file: str=None, min_face_size: int=20, steps_threshold: list=None, scale_factor: float=0.709): """ Initializes the MTCNN. :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load the ones bundled with the package. :param min_face_size: minimum size of the face to detect :param steps_threshold: step's thresholds values :param scale_factor: scale factor """ if steps_threshold is None: steps_threshold = [0.6, 0.7, 0.7] if weights_file is None: weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') self.__min_face_size = min_face_size self.__steps_threshold = steps_threshold self.__scale_factor = scale_factor config = tf.ConfigProto(log_device_placement=False) config.gpu_options.allow_growth = True self.__graph = tf.Graph() with self.__graph.as_default(): self.__session = tf.Session(config=config, graph=self.__graph) weights = np.load(weights_file).item() self.__pnet = PNet(self.__session, False) self.__pnet.set_weights(weights['PNet']) self.__rnet = RNet(self.__session, False) self.__rnet.set_weights(weights['RNet']) self.__onet = ONet(self.__session, False) self.__onet.set_weights(weights['ONet']) weights_file.close() @property def min_face_size(self): return self.__min_face_size @min_face_size.setter def min_face_size(self, mfc=20): try: self.__min_face_size = int(mfc) except ValueError: self.__min_face_size = 20 def __compute_scale_pyramid(self, m, min_layer): scales = [] factor_count = 0 while min_layer >= 12: scales += [m * np.power(self.__scale_factor, factor_count)] min_layer = min_layer * self.__scale_factor factor_count += 1 return scales @staticmethod def __scale_image(image, scale: float): """ Scales the image to a given scale. :param image: :param scale: :return: """ height, width, _ = image.shape width_scaled = int(np.ceil(width * scale)) height_scaled = int(np.ceil(height * scale)) im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA) # Normalize the image's pixels im_data_normalized = (im_data - 127.5) * 0.0078125 return im_data_normalized @staticmethod def __generate_bounding_box(imap, reg, scale, t): # use heatmap to generate bounding boxes stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty(shape=(0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1)/scale) q2 = np.fix((stride * bb + cellsize)/scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg @staticmethod def __nms(boxes, threshold, method): """ Non Maximum Suppression. :param boxes: np array with bounding boxes. :param threshold: :param method: NMS method to apply. Available values ('Min', 'Union') :return: """ if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] s = boxes[:, 4] area = (x2 - x1 + 1) * (y2 - y1 + 1) sorted_s = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while sorted_s.size > 0: i = sorted_s[-1] pick[counter] = i counter += 1 idx = sorted_s[0:-1] xx1 = np.maximum(x1[i], x1[idx]) yy1 = np.maximum(y1[i], y1[idx]) xx2 = np.minimum(x2[i], x2[idx]) yy2 = np.minimum(y2[i], y2[idx]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h if method is 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) sorted_s = sorted_s[np.where(o <= threshold)] pick = pick[0:counter] return pick @staticmethod def __pad(total_boxes, w, h): # compute the padding coordinates (pad the bounding boxes to square) tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) numbox = total_boxes.shape[0] dx = np.ones(numbox, dtype=np.int32) dy = np.ones(numbox, dtype=np.int32) edx = tmpw.copy().astype(np.int32) edy = tmph.copy().astype(np.int32) x = total_boxes[:, 0].copy().astype(np.int32) y = total_boxes[:, 1].copy().astype(np.int32) ex = total_boxes[:, 2].copy().astype(np.int32) ey = total_boxes[:, 3].copy().astype(np.int32) tmp = np.where(ex > w) edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) ex[tmp] = w tmp = np.where(ey > h) edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) ey[tmp] = h tmp = np.where(x < 1) dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) x[tmp] = 1 tmp = np.where(y < 1) dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) y[tmp] = 1 return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph @staticmethod def __rerec(bbox): # convert bbox to square h = bbox[:, 3] - bbox[:, 1] w = bbox[:, 2] - bbox[:, 0] l = np.maximum(w, h) bbox[:, 0] = bbox[:, 0] + w * 0.5 - l * 0.5 bbox[:, 1] = bbox[:, 1] + h * 0.5 - l * 0.5 bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(l, (2, 1))) return bbox @staticmethod def __bbreg(boundingbox, reg): # calibrate bounding boxes if reg.shape[1] == 1: reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) return boundingbox def detect_faces(self, img) -> list: """ Detects bounding boxes from the specified image. :param img: image to process :return: list containing all the bounding boxes detected with their keypoints. """ if img is None or not hasattr(img, "shape"): raise InvalidImage("Image not valid.") height, width, _ = img.shape stage_status = StageStatus(width=width, height=height) m = 12 / self.__min_face_size min_layer = np.amin([height, width]) * m scales = self.__compute_scale_pyramid(m, min_layer) stages = [self.__stage1, self.__stage2, self.__stage3] result = [scales, stage_status] # We pipe here each of the stages for stage in stages: result = stage(img, result[0], result[1]) [total_boxes, points] = result bounding_boxes = [] for bounding_box, keypoints in zip(total_boxes, points.T): bounding_boxes.append({ 'box': [int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])], 'confidence': bounding_box[-1], 'keypoints': { 'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9])), } } ) return bounding_boxes def __stage1(self, image, scales: list, stage_status: StageStatus): """ First stage of the MTCNN. :param image: :param scales: :param stage_status: :return: """ total_boxes = np.empty((0, 9)) status = stage_status for scale in scales: scaled_image = self.__scale_image(image, scale) img_x = np.expand_dims(scaled_image, 0) img_y = np.transpose(img_x, (0, 2, 1, 3)) out = self.__pnet.feed(img_y) out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, self.__steps_threshold[0]) # inter-scale nms pick = self.__nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numboxes = total_boxes.shape[0] if numboxes > 0: pick = self.__nms(total_boxes.copy(), 0.7, 'Union') total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = self.__rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) return total_boxes, status def __stage3(self, img, total_boxes, stage_status: StageStatus): """ Third stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, np.empty(shape=(0,)) total_boxes = np.fix(total_boxes).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) tempimg = np.zeros((48, 48, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), np.empty(shape=(0,)) tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__onet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) score = out2[1, :] points = out1 ipass = np.where(score > self.__steps_threshold[2]) points = points[:, ipass[0]] total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 if total_boxes.shape[0] > 0: total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) pick = self.__nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] return total_boxes, points def __del__(self): self.__session.close()
ipazc/mtcnn
mtcnn/mtcnn.py
MTCNN.__stage3
python
def __stage3(self, img, total_boxes, stage_status: StageStatus): num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, np.empty(shape=(0,)) total_boxes = np.fix(total_boxes).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) tempimg = np.zeros((48, 48, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), np.empty(shape=(0,)) tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__onet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) out2 = np.transpose(out[2]) score = out2[1, :] points = out1 ipass = np.where(score > self.__steps_threshold[2]) points = points[:, ipass[0]] total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] w = total_boxes[:, 2] - total_boxes[:, 0] + 1 h = total_boxes[:, 3] - total_boxes[:, 1] + 1 points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 if total_boxes.shape[0] > 0: total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) pick = self.__nms(total_boxes.copy(), 0.7, 'Min') total_boxes = total_boxes[pick, :] points = points[:, pick] return total_boxes, points
Third stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return:
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/mtcnn.py#L549-L613
null
class MTCNN(object): """ Allows to perform MTCNN Detection -> a) Detection of faces (with the confidence probability) b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) """ def __init__(self, weights_file: str=None, min_face_size: int=20, steps_threshold: list=None, scale_factor: float=0.709): """ Initializes the MTCNN. :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load the ones bundled with the package. :param min_face_size: minimum size of the face to detect :param steps_threshold: step's thresholds values :param scale_factor: scale factor """ if steps_threshold is None: steps_threshold = [0.6, 0.7, 0.7] if weights_file is None: weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') self.__min_face_size = min_face_size self.__steps_threshold = steps_threshold self.__scale_factor = scale_factor config = tf.ConfigProto(log_device_placement=False) config.gpu_options.allow_growth = True self.__graph = tf.Graph() with self.__graph.as_default(): self.__session = tf.Session(config=config, graph=self.__graph) weights = np.load(weights_file).item() self.__pnet = PNet(self.__session, False) self.__pnet.set_weights(weights['PNet']) self.__rnet = RNet(self.__session, False) self.__rnet.set_weights(weights['RNet']) self.__onet = ONet(self.__session, False) self.__onet.set_weights(weights['ONet']) weights_file.close() @property def min_face_size(self): return self.__min_face_size @min_face_size.setter def min_face_size(self, mfc=20): try: self.__min_face_size = int(mfc) except ValueError: self.__min_face_size = 20 def __compute_scale_pyramid(self, m, min_layer): scales = [] factor_count = 0 while min_layer >= 12: scales += [m * np.power(self.__scale_factor, factor_count)] min_layer = min_layer * self.__scale_factor factor_count += 1 return scales @staticmethod def __scale_image(image, scale: float): """ Scales the image to a given scale. :param image: :param scale: :return: """ height, width, _ = image.shape width_scaled = int(np.ceil(width * scale)) height_scaled = int(np.ceil(height * scale)) im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA) # Normalize the image's pixels im_data_normalized = (im_data - 127.5) * 0.0078125 return im_data_normalized @staticmethod def __generate_bounding_box(imap, reg, scale, t): # use heatmap to generate bounding boxes stride = 2 cellsize = 12 imap = np.transpose(imap) dx1 = np.transpose(reg[:, :, 0]) dy1 = np.transpose(reg[:, :, 1]) dx2 = np.transpose(reg[:, :, 2]) dy2 = np.transpose(reg[:, :, 3]) y, x = np.where(imap >= t) if y.shape[0] == 1: dx1 = np.flipud(dx1) dy1 = np.flipud(dy1) dx2 = np.flipud(dx2) dy2 = np.flipud(dy2) score = imap[(y, x)] reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) if reg.size == 0: reg = np.empty(shape=(0, 3)) bb = np.transpose(np.vstack([y, x])) q1 = np.fix((stride * bb + 1)/scale) q2 = np.fix((stride * bb + cellsize)/scale) boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) return boundingbox, reg @staticmethod def __nms(boxes, threshold, method): """ Non Maximum Suppression. :param boxes: np array with bounding boxes. :param threshold: :param method: NMS method to apply. Available values ('Min', 'Union') :return: """ if boxes.size == 0: return np.empty((0, 3)) x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] s = boxes[:, 4] area = (x2 - x1 + 1) * (y2 - y1 + 1) sorted_s = np.argsort(s) pick = np.zeros_like(s, dtype=np.int16) counter = 0 while sorted_s.size > 0: i = sorted_s[-1] pick[counter] = i counter += 1 idx = sorted_s[0:-1] xx1 = np.maximum(x1[i], x1[idx]) yy1 = np.maximum(y1[i], y1[idx]) xx2 = np.minimum(x2[i], x2[idx]) yy2 = np.minimum(y2[i], y2[idx]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h if method is 'Min': o = inter / np.minimum(area[i], area[idx]) else: o = inter / (area[i] + area[idx] - inter) sorted_s = sorted_s[np.where(o <= threshold)] pick = pick[0:counter] return pick @staticmethod def __pad(total_boxes, w, h): # compute the padding coordinates (pad the bounding boxes to square) tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) numbox = total_boxes.shape[0] dx = np.ones(numbox, dtype=np.int32) dy = np.ones(numbox, dtype=np.int32) edx = tmpw.copy().astype(np.int32) edy = tmph.copy().astype(np.int32) x = total_boxes[:, 0].copy().astype(np.int32) y = total_boxes[:, 1].copy().astype(np.int32) ex = total_boxes[:, 2].copy().astype(np.int32) ey = total_boxes[:, 3].copy().astype(np.int32) tmp = np.where(ex > w) edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) ex[tmp] = w tmp = np.where(ey > h) edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) ey[tmp] = h tmp = np.where(x < 1) dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) x[tmp] = 1 tmp = np.where(y < 1) dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) y[tmp] = 1 return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph @staticmethod def __rerec(bbox): # convert bbox to square h = bbox[:, 3] - bbox[:, 1] w = bbox[:, 2] - bbox[:, 0] l = np.maximum(w, h) bbox[:, 0] = bbox[:, 0] + w * 0.5 - l * 0.5 bbox[:, 1] = bbox[:, 1] + h * 0.5 - l * 0.5 bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(l, (2, 1))) return bbox @staticmethod def __bbreg(boundingbox, reg): # calibrate bounding boxes if reg.shape[1] == 1: reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) w = boundingbox[:, 2] - boundingbox[:, 0] + 1 h = boundingbox[:, 3] - boundingbox[:, 1] + 1 b1 = boundingbox[:, 0] + reg[:, 0] * w b2 = boundingbox[:, 1] + reg[:, 1] * h b3 = boundingbox[:, 2] + reg[:, 2] * w b4 = boundingbox[:, 3] + reg[:, 3] * h boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) return boundingbox def detect_faces(self, img) -> list: """ Detects bounding boxes from the specified image. :param img: image to process :return: list containing all the bounding boxes detected with their keypoints. """ if img is None or not hasattr(img, "shape"): raise InvalidImage("Image not valid.") height, width, _ = img.shape stage_status = StageStatus(width=width, height=height) m = 12 / self.__min_face_size min_layer = np.amin([height, width]) * m scales = self.__compute_scale_pyramid(m, min_layer) stages = [self.__stage1, self.__stage2, self.__stage3] result = [scales, stage_status] # We pipe here each of the stages for stage in stages: result = stage(img, result[0], result[1]) [total_boxes, points] = result bounding_boxes = [] for bounding_box, keypoints in zip(total_boxes, points.T): bounding_boxes.append({ 'box': [int(bounding_box[0]), int(bounding_box[1]), int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])], 'confidence': bounding_box[-1], 'keypoints': { 'left_eye': (int(keypoints[0]), int(keypoints[5])), 'right_eye': (int(keypoints[1]), int(keypoints[6])), 'nose': (int(keypoints[2]), int(keypoints[7])), 'mouth_left': (int(keypoints[3]), int(keypoints[8])), 'mouth_right': (int(keypoints[4]), int(keypoints[9])), } } ) return bounding_boxes def __stage1(self, image, scales: list, stage_status: StageStatus): """ First stage of the MTCNN. :param image: :param scales: :param stage_status: :return: """ total_boxes = np.empty((0, 9)) status = stage_status for scale in scales: scaled_image = self.__scale_image(image, scale) img_x = np.expand_dims(scaled_image, 0) img_y = np.transpose(img_x, (0, 2, 1, 3)) out = self.__pnet.feed(img_y) out0 = np.transpose(out[0], (0, 2, 1, 3)) out1 = np.transpose(out[1], (0, 2, 1, 3)) boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, self.__steps_threshold[0]) # inter-scale nms pick = self.__nms(boxes.copy(), 0.5, 'Union') if boxes.size > 0 and pick.size > 0: boxes = boxes[pick, :] total_boxes = np.append(total_boxes, boxes, axis=0) numboxes = total_boxes.shape[0] if numboxes > 0: pick = self.__nms(total_boxes.copy(), 0.7, 'Union') total_boxes = total_boxes[pick, :] regw = total_boxes[:, 2] - total_boxes[:, 0] regh = total_boxes[:, 3] - total_boxes[:, 1] qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) total_boxes = self.__rerec(total_boxes.copy()) total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), width=stage_status.width, height=stage_status.height) return total_boxes, status def __stage2(self, img, total_boxes, stage_status:StageStatus): """ Second stage of the MTCNN. :param img: :param total_boxes: :param stage_status: :return: """ num_boxes = total_boxes.shape[0] if num_boxes == 0: return total_boxes, stage_status # second stage tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) for k in range(0, num_boxes): tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) else: return np.empty(shape=(0,)), stage_status tempimg = (tempimg - 127.5) * 0.0078125 tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) out = self.__rnet.feed(tempimg1) out0 = np.transpose(out[0]) out1 = np.transpose(out[1]) score = out1[1, :] ipass = np.where(score > self.__steps_threshold[1]) total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) mv = out0[:, ipass[0]] if total_boxes.shape[0] > 0: pick = self.__nms(total_boxes, 0.7, 'Union') total_boxes = total_boxes[pick, :] total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) total_boxes = self.__rerec(total_boxes.copy()) return total_boxes, stage_status def __del__(self): self.__session.close()
ipazc/mtcnn
mtcnn/layer_factory.py
LayerFactory.__make_var
python
def __make_var(self, name: str, shape: list): return tf.get_variable(name, shape, trainable=self.__network.is_trainable())
Creates a tensorflow variable with the given name and shape. :param name: name to set for the variable. :param shape: list defining the shape of the variable. :return: created TF variable.
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/layer_factory.py#L72-L79
null
class LayerFactory(object): """ Allows to create stack layers for a given network. """ AVAILABLE_PADDINGS = ('SAME', 'VALID') def __init__(self, network): self.__network = network @staticmethod def __validate_padding(padding): if padding not in LayerFactory.AVAILABLE_PADDINGS: raise Exception("Padding {} not valid".format(padding)) @staticmethod def __validate_grouping(channels_input: int, channels_output: int, group: int): if channels_input % group != 0: raise Exception("The number of channels in the input does not match the group") if channels_output % group != 0: raise Exception("The number of channels in the output does not match the group") @staticmethod def vectorize_input(input_layer): input_shape = input_layer.get_shape() if input_shape.ndims == 4: # Spatial input, must be vectorized. dim = 1 for x in input_shape[1:].as_list(): dim *= int(x) #dim = operator.mul(*(input_shape[1:].as_list())) vectorized_input = tf.reshape(input_layer, [-1, dim]) else: vectorized_input, dim = (input_layer, input_shape[-1].value) return vectorized_input, dim def new_feed(self, name: str, layer_shape: tuple): """ Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return: """ feed_data = tf.placeholder(tf.float32, layer_shape, 'input') self.__network.add_layer(name, layer_output=feed_data) def new_conv(self, name: str, kernel_size: tuple, channels_output: int, stride_size: tuple, padding: str='SAME', group: int=1, biased: bool=True, relu: bool=True, input_layer_name: str=None): """ Creates a convolution layer for the network. :param name: name for the layer :param kernel_size: tuple containing the size of the kernel (Width, Height) :param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size. :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param group: groups for the kernel operation. More info required. :param biased: boolean flag to set if biased or not. :param relu: boolean flag to set if ReLu should be applied at the end of the layer or not. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ # Verify that the padding is acceptable self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) # Get the number of channels in the input channels_input = int(input_layer.get_shape()[-1]) # Verify that the grouping parameter is valid self.__validate_grouping(channels_input, channels_output, group) # Convolution for a given input and kernel convolve = lambda input_val, kernel: tf.nn.conv2d(input_val, kernel, [1, stride_size[1], stride_size[0], 1], padding=padding) with tf.variable_scope(name) as scope: kernel = self.__make_var('weights', shape=[kernel_size[1], kernel_size[0], channels_input // group, channels_output]) output = convolve(input_layer, kernel) # Add the biases, if required if biased: biases = self.__make_var('biases', [channels_output]) output = tf.nn.bias_add(output, biases) # Apply ReLU non-linearity, if required if relu: output = tf.nn.relu(output, name=scope.name) self.__network.add_layer(name, layer_output=output) def new_prelu(self, name: str, input_layer_name: str=None): """ Creates a new prelu layer with the given name and input. :param name: name for this layer. :param input_layer_name: name of the layer that serves as input for this one. """ input_layer = self.__network.get_layer(input_layer_name) with tf.variable_scope(name): channels_input = int(input_layer.get_shape()[-1]) alpha = self.__make_var('alpha', shape=[channels_input]) output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer)) self.__network.add_layer(name, layer_output=output) def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='SAME', input_layer_name: str=None): """ Creates a new max pooling layer. :param name: name for the layer. :param kernel_size: tuple containing the size of the kernel (Width, Height) :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) output = tf.nn.max_pool(input_layer, ksize=[1, kernel_size[1], kernel_size[0], 1], strides=[1, stride_size[1], stride_size[0], 1], padding=padding, name=name) self.__network.add_layer(name, layer_output=output) def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): """ Creates a new fully connected layer. :param name: name for the layer. :param output_count: number of outputs of the fully connected layer. :param relu: boolean flag to set if ReLu should be applied at the end of this layer. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ with tf.variable_scope(name): input_layer = self.__network.get_layer(input_layer_name) vectorized_input, dimension = self.vectorize_input(input_layer) weights = self.__make_var('weights', shape=[dimension, output_count]) biases = self.__make_var('biases', shape=[output_count]) operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b fc = operation(vectorized_input, weights, biases, name=name) self.__network.add_layer(name, layer_output=fc) def new_softmax(self, name, axis, input_layer_name: str=None): """ Creates a new softmax layer :param name: name to set for the layer :param axis: :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ input_layer = self.__network.get_layer(input_layer_name) if LooseVersion(tf.__version__) < LooseVersion("1.5.0"): max_axis = tf.reduce_max(input_layer, axis, keep_dims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keep_dims=True) else: max_axis = tf.reduce_max(input_layer, axis, keepdims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keepdims=True) softmax = tf.div(target_exp, normalize, name) self.__network.add_layer(name, layer_output=softmax)
ipazc/mtcnn
mtcnn/layer_factory.py
LayerFactory.new_feed
python
def new_feed(self, name: str, layer_shape: tuple): feed_data = tf.placeholder(tf.float32, layer_shape, 'input') self.__network.add_layer(name, layer_output=feed_data)
Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return:
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/layer_factory.py#L81-L89
null
class LayerFactory(object): """ Allows to create stack layers for a given network. """ AVAILABLE_PADDINGS = ('SAME', 'VALID') def __init__(self, network): self.__network = network @staticmethod def __validate_padding(padding): if padding not in LayerFactory.AVAILABLE_PADDINGS: raise Exception("Padding {} not valid".format(padding)) @staticmethod def __validate_grouping(channels_input: int, channels_output: int, group: int): if channels_input % group != 0: raise Exception("The number of channels in the input does not match the group") if channels_output % group != 0: raise Exception("The number of channels in the output does not match the group") @staticmethod def vectorize_input(input_layer): input_shape = input_layer.get_shape() if input_shape.ndims == 4: # Spatial input, must be vectorized. dim = 1 for x in input_shape[1:].as_list(): dim *= int(x) #dim = operator.mul(*(input_shape[1:].as_list())) vectorized_input = tf.reshape(input_layer, [-1, dim]) else: vectorized_input, dim = (input_layer, input_shape[-1].value) return vectorized_input, dim def __make_var(self, name: str, shape: list): """ Creates a tensorflow variable with the given name and shape. :param name: name to set for the variable. :param shape: list defining the shape of the variable. :return: created TF variable. """ return tf.get_variable(name, shape, trainable=self.__network.is_trainable()) def new_conv(self, name: str, kernel_size: tuple, channels_output: int, stride_size: tuple, padding: str='SAME', group: int=1, biased: bool=True, relu: bool=True, input_layer_name: str=None): """ Creates a convolution layer for the network. :param name: name for the layer :param kernel_size: tuple containing the size of the kernel (Width, Height) :param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size. :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param group: groups for the kernel operation. More info required. :param biased: boolean flag to set if biased or not. :param relu: boolean flag to set if ReLu should be applied at the end of the layer or not. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ # Verify that the padding is acceptable self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) # Get the number of channels in the input channels_input = int(input_layer.get_shape()[-1]) # Verify that the grouping parameter is valid self.__validate_grouping(channels_input, channels_output, group) # Convolution for a given input and kernel convolve = lambda input_val, kernel: tf.nn.conv2d(input_val, kernel, [1, stride_size[1], stride_size[0], 1], padding=padding) with tf.variable_scope(name) as scope: kernel = self.__make_var('weights', shape=[kernel_size[1], kernel_size[0], channels_input // group, channels_output]) output = convolve(input_layer, kernel) # Add the biases, if required if biased: biases = self.__make_var('biases', [channels_output]) output = tf.nn.bias_add(output, biases) # Apply ReLU non-linearity, if required if relu: output = tf.nn.relu(output, name=scope.name) self.__network.add_layer(name, layer_output=output) def new_prelu(self, name: str, input_layer_name: str=None): """ Creates a new prelu layer with the given name and input. :param name: name for this layer. :param input_layer_name: name of the layer that serves as input for this one. """ input_layer = self.__network.get_layer(input_layer_name) with tf.variable_scope(name): channels_input = int(input_layer.get_shape()[-1]) alpha = self.__make_var('alpha', shape=[channels_input]) output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer)) self.__network.add_layer(name, layer_output=output) def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='SAME', input_layer_name: str=None): """ Creates a new max pooling layer. :param name: name for the layer. :param kernel_size: tuple containing the size of the kernel (Width, Height) :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) output = tf.nn.max_pool(input_layer, ksize=[1, kernel_size[1], kernel_size[0], 1], strides=[1, stride_size[1], stride_size[0], 1], padding=padding, name=name) self.__network.add_layer(name, layer_output=output) def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): """ Creates a new fully connected layer. :param name: name for the layer. :param output_count: number of outputs of the fully connected layer. :param relu: boolean flag to set if ReLu should be applied at the end of this layer. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ with tf.variable_scope(name): input_layer = self.__network.get_layer(input_layer_name) vectorized_input, dimension = self.vectorize_input(input_layer) weights = self.__make_var('weights', shape=[dimension, output_count]) biases = self.__make_var('biases', shape=[output_count]) operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b fc = operation(vectorized_input, weights, biases, name=name) self.__network.add_layer(name, layer_output=fc) def new_softmax(self, name, axis, input_layer_name: str=None): """ Creates a new softmax layer :param name: name to set for the layer :param axis: :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ input_layer = self.__network.get_layer(input_layer_name) if LooseVersion(tf.__version__) < LooseVersion("1.5.0"): max_axis = tf.reduce_max(input_layer, axis, keep_dims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keep_dims=True) else: max_axis = tf.reduce_max(input_layer, axis, keepdims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keepdims=True) softmax = tf.div(target_exp, normalize, name) self.__network.add_layer(name, layer_output=softmax)
ipazc/mtcnn
mtcnn/layer_factory.py
LayerFactory.new_conv
python
def new_conv(self, name: str, kernel_size: tuple, channels_output: int, stride_size: tuple, padding: str='SAME', group: int=1, biased: bool=True, relu: bool=True, input_layer_name: str=None): # Verify that the padding is acceptable self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) # Get the number of channels in the input channels_input = int(input_layer.get_shape()[-1]) # Verify that the grouping parameter is valid self.__validate_grouping(channels_input, channels_output, group) # Convolution for a given input and kernel convolve = lambda input_val, kernel: tf.nn.conv2d(input_val, kernel, [1, stride_size[1], stride_size[0], 1], padding=padding) with tf.variable_scope(name) as scope: kernel = self.__make_var('weights', shape=[kernel_size[1], kernel_size[0], channels_input // group, channels_output]) output = convolve(input_layer, kernel) # Add the biases, if required if biased: biases = self.__make_var('biases', [channels_output]) output = tf.nn.bias_add(output, biases) # Apply ReLU non-linearity, if required if relu: output = tf.nn.relu(output, name=scope.name) self.__network.add_layer(name, layer_output=output)
Creates a convolution layer for the network. :param name: name for the layer :param kernel_size: tuple containing the size of the kernel (Width, Height) :param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size. :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param group: groups for the kernel operation. More info required. :param biased: boolean flag to set if biased or not. :param relu: boolean flag to set if ReLu should be applied at the end of the layer or not. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network.
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/layer_factory.py#L91-L138
[ "def __validate_padding(padding):\n if padding not in LayerFactory.AVAILABLE_PADDINGS:\n raise Exception(\"Padding {} not valid\".format(padding))\n", "def __validate_grouping(channels_input: int, channels_output: int, group: int):\n if channels_input % group != 0:\n raise Exception(\"The number of channels in the input does not match the group\")\n\n if channels_output % group != 0:\n raise Exception(\"The number of channels in the output does not match the group\")\n", "def __make_var(self, name: str, shape: list):\n \"\"\"\n Creates a tensorflow variable with the given name and shape.\n :param name: name to set for the variable.\n :param shape: list defining the shape of the variable.\n :return: created TF variable.\n \"\"\"\n return tf.get_variable(name, shape, trainable=self.__network.is_trainable())\n", "convolve = lambda input_val, kernel: tf.nn.conv2d(input_val, kernel, [1, stride_size[1], stride_size[0], 1],\n padding=padding)\n" ]
class LayerFactory(object): """ Allows to create stack layers for a given network. """ AVAILABLE_PADDINGS = ('SAME', 'VALID') def __init__(self, network): self.__network = network @staticmethod def __validate_padding(padding): if padding not in LayerFactory.AVAILABLE_PADDINGS: raise Exception("Padding {} not valid".format(padding)) @staticmethod def __validate_grouping(channels_input: int, channels_output: int, group: int): if channels_input % group != 0: raise Exception("The number of channels in the input does not match the group") if channels_output % group != 0: raise Exception("The number of channels in the output does not match the group") @staticmethod def vectorize_input(input_layer): input_shape = input_layer.get_shape() if input_shape.ndims == 4: # Spatial input, must be vectorized. dim = 1 for x in input_shape[1:].as_list(): dim *= int(x) #dim = operator.mul(*(input_shape[1:].as_list())) vectorized_input = tf.reshape(input_layer, [-1, dim]) else: vectorized_input, dim = (input_layer, input_shape[-1].value) return vectorized_input, dim def __make_var(self, name: str, shape: list): """ Creates a tensorflow variable with the given name and shape. :param name: name to set for the variable. :param shape: list defining the shape of the variable. :return: created TF variable. """ return tf.get_variable(name, shape, trainable=self.__network.is_trainable()) def new_feed(self, name: str, layer_shape: tuple): """ Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return: """ feed_data = tf.placeholder(tf.float32, layer_shape, 'input') self.__network.add_layer(name, layer_output=feed_data) def new_prelu(self, name: str, input_layer_name: str=None): """ Creates a new prelu layer with the given name and input. :param name: name for this layer. :param input_layer_name: name of the layer that serves as input for this one. """ input_layer = self.__network.get_layer(input_layer_name) with tf.variable_scope(name): channels_input = int(input_layer.get_shape()[-1]) alpha = self.__make_var('alpha', shape=[channels_input]) output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer)) self.__network.add_layer(name, layer_output=output) def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='SAME', input_layer_name: str=None): """ Creates a new max pooling layer. :param name: name for the layer. :param kernel_size: tuple containing the size of the kernel (Width, Height) :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) output = tf.nn.max_pool(input_layer, ksize=[1, kernel_size[1], kernel_size[0], 1], strides=[1, stride_size[1], stride_size[0], 1], padding=padding, name=name) self.__network.add_layer(name, layer_output=output) def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): """ Creates a new fully connected layer. :param name: name for the layer. :param output_count: number of outputs of the fully connected layer. :param relu: boolean flag to set if ReLu should be applied at the end of this layer. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ with tf.variable_scope(name): input_layer = self.__network.get_layer(input_layer_name) vectorized_input, dimension = self.vectorize_input(input_layer) weights = self.__make_var('weights', shape=[dimension, output_count]) biases = self.__make_var('biases', shape=[output_count]) operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b fc = operation(vectorized_input, weights, biases, name=name) self.__network.add_layer(name, layer_output=fc) def new_softmax(self, name, axis, input_layer_name: str=None): """ Creates a new softmax layer :param name: name to set for the layer :param axis: :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ input_layer = self.__network.get_layer(input_layer_name) if LooseVersion(tf.__version__) < LooseVersion("1.5.0"): max_axis = tf.reduce_max(input_layer, axis, keep_dims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keep_dims=True) else: max_axis = tf.reduce_max(input_layer, axis, keepdims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keepdims=True) softmax = tf.div(target_exp, normalize, name) self.__network.add_layer(name, layer_output=softmax)
ipazc/mtcnn
mtcnn/layer_factory.py
LayerFactory.new_prelu
python
def new_prelu(self, name: str, input_layer_name: str=None): input_layer = self.__network.get_layer(input_layer_name) with tf.variable_scope(name): channels_input = int(input_layer.get_shape()[-1]) alpha = self.__make_var('alpha', shape=[channels_input]) output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer)) self.__network.add_layer(name, layer_output=output)
Creates a new prelu layer with the given name and input. :param name: name for this layer. :param input_layer_name: name of the layer that serves as input for this one.
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/layer_factory.py#L140-L153
[ "def __make_var(self, name: str, shape: list):\n \"\"\"\n Creates a tensorflow variable with the given name and shape.\n :param name: name to set for the variable.\n :param shape: list defining the shape of the variable.\n :return: created TF variable.\n \"\"\"\n return tf.get_variable(name, shape, trainable=self.__network.is_trainable())\n" ]
class LayerFactory(object): """ Allows to create stack layers for a given network. """ AVAILABLE_PADDINGS = ('SAME', 'VALID') def __init__(self, network): self.__network = network @staticmethod def __validate_padding(padding): if padding not in LayerFactory.AVAILABLE_PADDINGS: raise Exception("Padding {} not valid".format(padding)) @staticmethod def __validate_grouping(channels_input: int, channels_output: int, group: int): if channels_input % group != 0: raise Exception("The number of channels in the input does not match the group") if channels_output % group != 0: raise Exception("The number of channels in the output does not match the group") @staticmethod def vectorize_input(input_layer): input_shape = input_layer.get_shape() if input_shape.ndims == 4: # Spatial input, must be vectorized. dim = 1 for x in input_shape[1:].as_list(): dim *= int(x) #dim = operator.mul(*(input_shape[1:].as_list())) vectorized_input = tf.reshape(input_layer, [-1, dim]) else: vectorized_input, dim = (input_layer, input_shape[-1].value) return vectorized_input, dim def __make_var(self, name: str, shape: list): """ Creates a tensorflow variable with the given name and shape. :param name: name to set for the variable. :param shape: list defining the shape of the variable. :return: created TF variable. """ return tf.get_variable(name, shape, trainable=self.__network.is_trainable()) def new_feed(self, name: str, layer_shape: tuple): """ Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return: """ feed_data = tf.placeholder(tf.float32, layer_shape, 'input') self.__network.add_layer(name, layer_output=feed_data) def new_conv(self, name: str, kernel_size: tuple, channels_output: int, stride_size: tuple, padding: str='SAME', group: int=1, biased: bool=True, relu: bool=True, input_layer_name: str=None): """ Creates a convolution layer for the network. :param name: name for the layer :param kernel_size: tuple containing the size of the kernel (Width, Height) :param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size. :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param group: groups for the kernel operation. More info required. :param biased: boolean flag to set if biased or not. :param relu: boolean flag to set if ReLu should be applied at the end of the layer or not. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ # Verify that the padding is acceptable self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) # Get the number of channels in the input channels_input = int(input_layer.get_shape()[-1]) # Verify that the grouping parameter is valid self.__validate_grouping(channels_input, channels_output, group) # Convolution for a given input and kernel convolve = lambda input_val, kernel: tf.nn.conv2d(input_val, kernel, [1, stride_size[1], stride_size[0], 1], padding=padding) with tf.variable_scope(name) as scope: kernel = self.__make_var('weights', shape=[kernel_size[1], kernel_size[0], channels_input // group, channels_output]) output = convolve(input_layer, kernel) # Add the biases, if required if biased: biases = self.__make_var('biases', [channels_output]) output = tf.nn.bias_add(output, biases) # Apply ReLU non-linearity, if required if relu: output = tf.nn.relu(output, name=scope.name) self.__network.add_layer(name, layer_output=output) def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='SAME', input_layer_name: str=None): """ Creates a new max pooling layer. :param name: name for the layer. :param kernel_size: tuple containing the size of the kernel (Width, Height) :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) output = tf.nn.max_pool(input_layer, ksize=[1, kernel_size[1], kernel_size[0], 1], strides=[1, stride_size[1], stride_size[0], 1], padding=padding, name=name) self.__network.add_layer(name, layer_output=output) def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): """ Creates a new fully connected layer. :param name: name for the layer. :param output_count: number of outputs of the fully connected layer. :param relu: boolean flag to set if ReLu should be applied at the end of this layer. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ with tf.variable_scope(name): input_layer = self.__network.get_layer(input_layer_name) vectorized_input, dimension = self.vectorize_input(input_layer) weights = self.__make_var('weights', shape=[dimension, output_count]) biases = self.__make_var('biases', shape=[output_count]) operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b fc = operation(vectorized_input, weights, biases, name=name) self.__network.add_layer(name, layer_output=fc) def new_softmax(self, name, axis, input_layer_name: str=None): """ Creates a new softmax layer :param name: name to set for the layer :param axis: :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ input_layer = self.__network.get_layer(input_layer_name) if LooseVersion(tf.__version__) < LooseVersion("1.5.0"): max_axis = tf.reduce_max(input_layer, axis, keep_dims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keep_dims=True) else: max_axis = tf.reduce_max(input_layer, axis, keepdims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keepdims=True) softmax = tf.div(target_exp, normalize, name) self.__network.add_layer(name, layer_output=softmax)
ipazc/mtcnn
mtcnn/layer_factory.py
LayerFactory.new_max_pool
python
def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='SAME', input_layer_name: str=None): self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) output = tf.nn.max_pool(input_layer, ksize=[1, kernel_size[1], kernel_size[0], 1], strides=[1, stride_size[1], stride_size[0], 1], padding=padding, name=name) self.__network.add_layer(name, layer_output=output)
Creates a new max pooling layer. :param name: name for the layer. :param kernel_size: tuple containing the size of the kernel (Width, Height) :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network.
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/layer_factory.py#L155-L177
[ "def __validate_padding(padding):\n if padding not in LayerFactory.AVAILABLE_PADDINGS:\n raise Exception(\"Padding {} not valid\".format(padding))\n" ]
class LayerFactory(object): """ Allows to create stack layers for a given network. """ AVAILABLE_PADDINGS = ('SAME', 'VALID') def __init__(self, network): self.__network = network @staticmethod def __validate_padding(padding): if padding not in LayerFactory.AVAILABLE_PADDINGS: raise Exception("Padding {} not valid".format(padding)) @staticmethod def __validate_grouping(channels_input: int, channels_output: int, group: int): if channels_input % group != 0: raise Exception("The number of channels in the input does not match the group") if channels_output % group != 0: raise Exception("The number of channels in the output does not match the group") @staticmethod def vectorize_input(input_layer): input_shape = input_layer.get_shape() if input_shape.ndims == 4: # Spatial input, must be vectorized. dim = 1 for x in input_shape[1:].as_list(): dim *= int(x) #dim = operator.mul(*(input_shape[1:].as_list())) vectorized_input = tf.reshape(input_layer, [-1, dim]) else: vectorized_input, dim = (input_layer, input_shape[-1].value) return vectorized_input, dim def __make_var(self, name: str, shape: list): """ Creates a tensorflow variable with the given name and shape. :param name: name to set for the variable. :param shape: list defining the shape of the variable. :return: created TF variable. """ return tf.get_variable(name, shape, trainable=self.__network.is_trainable()) def new_feed(self, name: str, layer_shape: tuple): """ Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return: """ feed_data = tf.placeholder(tf.float32, layer_shape, 'input') self.__network.add_layer(name, layer_output=feed_data) def new_conv(self, name: str, kernel_size: tuple, channels_output: int, stride_size: tuple, padding: str='SAME', group: int=1, biased: bool=True, relu: bool=True, input_layer_name: str=None): """ Creates a convolution layer for the network. :param name: name for the layer :param kernel_size: tuple containing the size of the kernel (Width, Height) :param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size. :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param group: groups for the kernel operation. More info required. :param biased: boolean flag to set if biased or not. :param relu: boolean flag to set if ReLu should be applied at the end of the layer or not. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ # Verify that the padding is acceptable self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) # Get the number of channels in the input channels_input = int(input_layer.get_shape()[-1]) # Verify that the grouping parameter is valid self.__validate_grouping(channels_input, channels_output, group) # Convolution for a given input and kernel convolve = lambda input_val, kernel: tf.nn.conv2d(input_val, kernel, [1, stride_size[1], stride_size[0], 1], padding=padding) with tf.variable_scope(name) as scope: kernel = self.__make_var('weights', shape=[kernel_size[1], kernel_size[0], channels_input // group, channels_output]) output = convolve(input_layer, kernel) # Add the biases, if required if biased: biases = self.__make_var('biases', [channels_output]) output = tf.nn.bias_add(output, biases) # Apply ReLU non-linearity, if required if relu: output = tf.nn.relu(output, name=scope.name) self.__network.add_layer(name, layer_output=output) def new_prelu(self, name: str, input_layer_name: str=None): """ Creates a new prelu layer with the given name and input. :param name: name for this layer. :param input_layer_name: name of the layer that serves as input for this one. """ input_layer = self.__network.get_layer(input_layer_name) with tf.variable_scope(name): channels_input = int(input_layer.get_shape()[-1]) alpha = self.__make_var('alpha', shape=[channels_input]) output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer)) self.__network.add_layer(name, layer_output=output) def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): """ Creates a new fully connected layer. :param name: name for the layer. :param output_count: number of outputs of the fully connected layer. :param relu: boolean flag to set if ReLu should be applied at the end of this layer. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ with tf.variable_scope(name): input_layer = self.__network.get_layer(input_layer_name) vectorized_input, dimension = self.vectorize_input(input_layer) weights = self.__make_var('weights', shape=[dimension, output_count]) biases = self.__make_var('biases', shape=[output_count]) operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b fc = operation(vectorized_input, weights, biases, name=name) self.__network.add_layer(name, layer_output=fc) def new_softmax(self, name, axis, input_layer_name: str=None): """ Creates a new softmax layer :param name: name to set for the layer :param axis: :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ input_layer = self.__network.get_layer(input_layer_name) if LooseVersion(tf.__version__) < LooseVersion("1.5.0"): max_axis = tf.reduce_max(input_layer, axis, keep_dims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keep_dims=True) else: max_axis = tf.reduce_max(input_layer, axis, keepdims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keepdims=True) softmax = tf.div(target_exp, normalize, name) self.__network.add_layer(name, layer_output=softmax)
ipazc/mtcnn
mtcnn/layer_factory.py
LayerFactory.new_fully_connected
python
def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): with tf.variable_scope(name): input_layer = self.__network.get_layer(input_layer_name) vectorized_input, dimension = self.vectorize_input(input_layer) weights = self.__make_var('weights', shape=[dimension, output_count]) biases = self.__make_var('biases', shape=[output_count]) operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b fc = operation(vectorized_input, weights, biases, name=name) self.__network.add_layer(name, layer_output=fc)
Creates a new fully connected layer. :param name: name for the layer. :param output_count: number of outputs of the fully connected layer. :param relu: boolean flag to set if ReLu should be applied at the end of this layer. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network.
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/layer_factory.py#L179-L200
[ "def vectorize_input(input_layer):\n input_shape = input_layer.get_shape()\n\n if input_shape.ndims == 4:\n # Spatial input, must be vectorized.\n dim = 1\n for x in input_shape[1:].as_list():\n dim *= int(x)\n\n #dim = operator.mul(*(input_shape[1:].as_list()))\n vectorized_input = tf.reshape(input_layer, [-1, dim])\n else:\n vectorized_input, dim = (input_layer, input_shape[-1].value)\n\n return vectorized_input, dim\n", "def __make_var(self, name: str, shape: list):\n \"\"\"\n Creates a tensorflow variable with the given name and shape.\n :param name: name to set for the variable.\n :param shape: list defining the shape of the variable.\n :return: created TF variable.\n \"\"\"\n return tf.get_variable(name, shape, trainable=self.__network.is_trainable())\n" ]
class LayerFactory(object): """ Allows to create stack layers for a given network. """ AVAILABLE_PADDINGS = ('SAME', 'VALID') def __init__(self, network): self.__network = network @staticmethod def __validate_padding(padding): if padding not in LayerFactory.AVAILABLE_PADDINGS: raise Exception("Padding {} not valid".format(padding)) @staticmethod def __validate_grouping(channels_input: int, channels_output: int, group: int): if channels_input % group != 0: raise Exception("The number of channels in the input does not match the group") if channels_output % group != 0: raise Exception("The number of channels in the output does not match the group") @staticmethod def vectorize_input(input_layer): input_shape = input_layer.get_shape() if input_shape.ndims == 4: # Spatial input, must be vectorized. dim = 1 for x in input_shape[1:].as_list(): dim *= int(x) #dim = operator.mul(*(input_shape[1:].as_list())) vectorized_input = tf.reshape(input_layer, [-1, dim]) else: vectorized_input, dim = (input_layer, input_shape[-1].value) return vectorized_input, dim def __make_var(self, name: str, shape: list): """ Creates a tensorflow variable with the given name and shape. :param name: name to set for the variable. :param shape: list defining the shape of the variable. :return: created TF variable. """ return tf.get_variable(name, shape, trainable=self.__network.is_trainable()) def new_feed(self, name: str, layer_shape: tuple): """ Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return: """ feed_data = tf.placeholder(tf.float32, layer_shape, 'input') self.__network.add_layer(name, layer_output=feed_data) def new_conv(self, name: str, kernel_size: tuple, channels_output: int, stride_size: tuple, padding: str='SAME', group: int=1, biased: bool=True, relu: bool=True, input_layer_name: str=None): """ Creates a convolution layer for the network. :param name: name for the layer :param kernel_size: tuple containing the size of the kernel (Width, Height) :param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size. :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param group: groups for the kernel operation. More info required. :param biased: boolean flag to set if biased or not. :param relu: boolean flag to set if ReLu should be applied at the end of the layer or not. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ # Verify that the padding is acceptable self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) # Get the number of channels in the input channels_input = int(input_layer.get_shape()[-1]) # Verify that the grouping parameter is valid self.__validate_grouping(channels_input, channels_output, group) # Convolution for a given input and kernel convolve = lambda input_val, kernel: tf.nn.conv2d(input_val, kernel, [1, stride_size[1], stride_size[0], 1], padding=padding) with tf.variable_scope(name) as scope: kernel = self.__make_var('weights', shape=[kernel_size[1], kernel_size[0], channels_input // group, channels_output]) output = convolve(input_layer, kernel) # Add the biases, if required if biased: biases = self.__make_var('biases', [channels_output]) output = tf.nn.bias_add(output, biases) # Apply ReLU non-linearity, if required if relu: output = tf.nn.relu(output, name=scope.name) self.__network.add_layer(name, layer_output=output) def new_prelu(self, name: str, input_layer_name: str=None): """ Creates a new prelu layer with the given name and input. :param name: name for this layer. :param input_layer_name: name of the layer that serves as input for this one. """ input_layer = self.__network.get_layer(input_layer_name) with tf.variable_scope(name): channels_input = int(input_layer.get_shape()[-1]) alpha = self.__make_var('alpha', shape=[channels_input]) output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer)) self.__network.add_layer(name, layer_output=output) def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='SAME', input_layer_name: str=None): """ Creates a new max pooling layer. :param name: name for the layer. :param kernel_size: tuple containing the size of the kernel (Width, Height) :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) output = tf.nn.max_pool(input_layer, ksize=[1, kernel_size[1], kernel_size[0], 1], strides=[1, stride_size[1], stride_size[0], 1], padding=padding, name=name) self.__network.add_layer(name, layer_output=output) def new_softmax(self, name, axis, input_layer_name: str=None): """ Creates a new softmax layer :param name: name to set for the layer :param axis: :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ input_layer = self.__network.get_layer(input_layer_name) if LooseVersion(tf.__version__) < LooseVersion("1.5.0"): max_axis = tf.reduce_max(input_layer, axis, keep_dims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keep_dims=True) else: max_axis = tf.reduce_max(input_layer, axis, keepdims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keepdims=True) softmax = tf.div(target_exp, normalize, name) self.__network.add_layer(name, layer_output=softmax)
ipazc/mtcnn
mtcnn/layer_factory.py
LayerFactory.new_softmax
python
def new_softmax(self, name, axis, input_layer_name: str=None): input_layer = self.__network.get_layer(input_layer_name) if LooseVersion(tf.__version__) < LooseVersion("1.5.0"): max_axis = tf.reduce_max(input_layer, axis, keep_dims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keep_dims=True) else: max_axis = tf.reduce_max(input_layer, axis, keepdims=True) target_exp = tf.exp(input_layer - max_axis) normalize = tf.reduce_sum(target_exp, axis, keepdims=True) softmax = tf.div(target_exp, normalize, name) self.__network.add_layer(name, layer_output=softmax)
Creates a new softmax layer :param name: name to set for the layer :param axis: :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network.
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/layer_factory.py#L202-L223
null
class LayerFactory(object): """ Allows to create stack layers for a given network. """ AVAILABLE_PADDINGS = ('SAME', 'VALID') def __init__(self, network): self.__network = network @staticmethod def __validate_padding(padding): if padding not in LayerFactory.AVAILABLE_PADDINGS: raise Exception("Padding {} not valid".format(padding)) @staticmethod def __validate_grouping(channels_input: int, channels_output: int, group: int): if channels_input % group != 0: raise Exception("The number of channels in the input does not match the group") if channels_output % group != 0: raise Exception("The number of channels in the output does not match the group") @staticmethod def vectorize_input(input_layer): input_shape = input_layer.get_shape() if input_shape.ndims == 4: # Spatial input, must be vectorized. dim = 1 for x in input_shape[1:].as_list(): dim *= int(x) #dim = operator.mul(*(input_shape[1:].as_list())) vectorized_input = tf.reshape(input_layer, [-1, dim]) else: vectorized_input, dim = (input_layer, input_shape[-1].value) return vectorized_input, dim def __make_var(self, name: str, shape: list): """ Creates a tensorflow variable with the given name and shape. :param name: name to set for the variable. :param shape: list defining the shape of the variable. :return: created TF variable. """ return tf.get_variable(name, shape, trainable=self.__network.is_trainable()) def new_feed(self, name: str, layer_shape: tuple): """ Creates a feed layer. This is usually the first layer in the network. :param name: name of the layer :return: """ feed_data = tf.placeholder(tf.float32, layer_shape, 'input') self.__network.add_layer(name, layer_output=feed_data) def new_conv(self, name: str, kernel_size: tuple, channels_output: int, stride_size: tuple, padding: str='SAME', group: int=1, biased: bool=True, relu: bool=True, input_layer_name: str=None): """ Creates a convolution layer for the network. :param name: name for the layer :param kernel_size: tuple containing the size of the kernel (Width, Height) :param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size. :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param group: groups for the kernel operation. More info required. :param biased: boolean flag to set if biased or not. :param relu: boolean flag to set if ReLu should be applied at the end of the layer or not. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ # Verify that the padding is acceptable self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) # Get the number of channels in the input channels_input = int(input_layer.get_shape()[-1]) # Verify that the grouping parameter is valid self.__validate_grouping(channels_input, channels_output, group) # Convolution for a given input and kernel convolve = lambda input_val, kernel: tf.nn.conv2d(input_val, kernel, [1, stride_size[1], stride_size[0], 1], padding=padding) with tf.variable_scope(name) as scope: kernel = self.__make_var('weights', shape=[kernel_size[1], kernel_size[0], channels_input // group, channels_output]) output = convolve(input_layer, kernel) # Add the biases, if required if biased: biases = self.__make_var('biases', [channels_output]) output = tf.nn.bias_add(output, biases) # Apply ReLU non-linearity, if required if relu: output = tf.nn.relu(output, name=scope.name) self.__network.add_layer(name, layer_output=output) def new_prelu(self, name: str, input_layer_name: str=None): """ Creates a new prelu layer with the given name and input. :param name: name for this layer. :param input_layer_name: name of the layer that serves as input for this one. """ input_layer = self.__network.get_layer(input_layer_name) with tf.variable_scope(name): channels_input = int(input_layer.get_shape()[-1]) alpha = self.__make_var('alpha', shape=[channels_input]) output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer)) self.__network.add_layer(name, layer_output=output) def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='SAME', input_layer_name: str=None): """ Creates a new max pooling layer. :param name: name for the layer. :param kernel_size: tuple containing the size of the kernel (Width, Height) :param stride_size: tuple containing the size of the stride (Width, Height) :param padding: Type of padding. Available values are: ('SAME', 'VALID') :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ self.__validate_padding(padding) input_layer = self.__network.get_layer(input_layer_name) output = tf.nn.max_pool(input_layer, ksize=[1, kernel_size[1], kernel_size[0], 1], strides=[1, stride_size[1], stride_size[0], 1], padding=padding, name=name) self.__network.add_layer(name, layer_output=output) def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): """ Creates a new fully connected layer. :param name: name for the layer. :param output_count: number of outputs of the fully connected layer. :param relu: boolean flag to set if ReLu should be applied at the end of this layer. :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of the network. """ with tf.variable_scope(name): input_layer = self.__network.get_layer(input_layer_name) vectorized_input, dimension = self.vectorize_input(input_layer) weights = self.__make_var('weights', shape=[dimension, output_count]) biases = self.__make_var('biases', shape=[output_count]) operation = tf.nn.relu_layer if relu else tf.nn.xw_plus_b fc = operation(vectorized_input, weights, biases, name=name) self.__network.add_layer(name, layer_output=fc)
ipazc/mtcnn
mtcnn/network.py
Network.add_layer
python
def add_layer(self, name: str, layer_output): self.__layers[name] = layer_output self.__last_layer_name = name
Adds a layer to the network. :param name: name of the layer to add :param layer_output: output layer.
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/network.py#L53-L60
null
class Network(object): def __init__(self, session, trainable: bool=True): """ Initializes the network. :param trainable: flag to determine if this network should be trainable or not. """ self._session = session self.__trainable = trainable self.__layers = {} self.__last_layer_name = None with tf.variable_scope(self.__class__.__name__.lower()): self._config() def _config(self): """ Configures the network layers. It is usually done using the LayerFactory() class. """ raise NotImplementedError("This method must be implemented by the network.") def get_layer(self, name: str=None): """ Retrieves the layer by its name. :param name: name of the layer to retrieve. If name is None, it will retrieve the last added layer to the network. :return: layer output """ if name is None: name = self.__last_layer_name return self.__layers[name] def is_trainable(self): """ Getter for the trainable flag. """ return self.__trainable def set_weights(self, weights_values: dict, ignore_missing=False): """ Sets the weights values of the network. :param weights_values: dictionary with weights for each layer """ network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): for layer_name in weights_values: with tf.variable_scope(layer_name, reuse=True): for param_name, data in weights_values[layer_name].items(): try: var = tf.get_variable(param_name) self._session.run(var.assign(data)) except ValueError: if not ignore_missing: raise def feed(self, image): """ Feeds the network with an image :param image: image (perhaps loaded with CV2) :return: network result """ network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): return self._feed(image) def _feed(self, image): raise NotImplementedError("Method not implemented.")
ipazc/mtcnn
mtcnn/network.py
Network.get_layer
python
def get_layer(self, name: str=None): if name is None: name = self.__last_layer_name return self.__layers[name]
Retrieves the layer by its name. :param name: name of the layer to retrieve. If name is None, it will retrieve the last added layer to the network. :return: layer output
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/network.py#L62-L72
null
class Network(object): def __init__(self, session, trainable: bool=True): """ Initializes the network. :param trainable: flag to determine if this network should be trainable or not. """ self._session = session self.__trainable = trainable self.__layers = {} self.__last_layer_name = None with tf.variable_scope(self.__class__.__name__.lower()): self._config() def _config(self): """ Configures the network layers. It is usually done using the LayerFactory() class. """ raise NotImplementedError("This method must be implemented by the network.") def add_layer(self, name: str, layer_output): """ Adds a layer to the network. :param name: name of the layer to add :param layer_output: output layer. """ self.__layers[name] = layer_output self.__last_layer_name = name def is_trainable(self): """ Getter for the trainable flag. """ return self.__trainable def set_weights(self, weights_values: dict, ignore_missing=False): """ Sets the weights values of the network. :param weights_values: dictionary with weights for each layer """ network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): for layer_name in weights_values: with tf.variable_scope(layer_name, reuse=True): for param_name, data in weights_values[layer_name].items(): try: var = tf.get_variable(param_name) self._session.run(var.assign(data)) except ValueError: if not ignore_missing: raise def feed(self, image): """ Feeds the network with an image :param image: image (perhaps loaded with CV2) :return: network result """ network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): return self._feed(image) def _feed(self, image): raise NotImplementedError("Method not implemented.")
ipazc/mtcnn
mtcnn/network.py
Network.set_weights
python
def set_weights(self, weights_values: dict, ignore_missing=False): network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): for layer_name in weights_values: with tf.variable_scope(layer_name, reuse=True): for param_name, data in weights_values[layer_name].items(): try: var = tf.get_variable(param_name) self._session.run(var.assign(data)) except ValueError: if not ignore_missing: raise
Sets the weights values of the network. :param weights_values: dictionary with weights for each layer
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/network.py#L80-L97
null
class Network(object): def __init__(self, session, trainable: bool=True): """ Initializes the network. :param trainable: flag to determine if this network should be trainable or not. """ self._session = session self.__trainable = trainable self.__layers = {} self.__last_layer_name = None with tf.variable_scope(self.__class__.__name__.lower()): self._config() def _config(self): """ Configures the network layers. It is usually done using the LayerFactory() class. """ raise NotImplementedError("This method must be implemented by the network.") def add_layer(self, name: str, layer_output): """ Adds a layer to the network. :param name: name of the layer to add :param layer_output: output layer. """ self.__layers[name] = layer_output self.__last_layer_name = name def get_layer(self, name: str=None): """ Retrieves the layer by its name. :param name: name of the layer to retrieve. If name is None, it will retrieve the last added layer to the network. :return: layer output """ if name is None: name = self.__last_layer_name return self.__layers[name] def is_trainable(self): """ Getter for the trainable flag. """ return self.__trainable def feed(self, image): """ Feeds the network with an image :param image: image (perhaps loaded with CV2) :return: network result """ network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): return self._feed(image) def _feed(self, image): raise NotImplementedError("Method not implemented.")
ipazc/mtcnn
mtcnn/network.py
Network.feed
python
def feed(self, image): network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): return self._feed(image)
Feeds the network with an image :param image: image (perhaps loaded with CV2) :return: network result
train
https://github.com/ipazc/mtcnn/blob/17029fe453a435f50c472ae2fd1c493341b5ede3/mtcnn/network.py#L99-L108
[ "def _feed(self, image):\n raise NotImplementedError(\"Method not implemented.\")" ]
class Network(object): def __init__(self, session, trainable: bool=True): """ Initializes the network. :param trainable: flag to determine if this network should be trainable or not. """ self._session = session self.__trainable = trainable self.__layers = {} self.__last_layer_name = None with tf.variable_scope(self.__class__.__name__.lower()): self._config() def _config(self): """ Configures the network layers. It is usually done using the LayerFactory() class. """ raise NotImplementedError("This method must be implemented by the network.") def add_layer(self, name: str, layer_output): """ Adds a layer to the network. :param name: name of the layer to add :param layer_output: output layer. """ self.__layers[name] = layer_output self.__last_layer_name = name def get_layer(self, name: str=None): """ Retrieves the layer by its name. :param name: name of the layer to retrieve. If name is None, it will retrieve the last added layer to the network. :return: layer output """ if name is None: name = self.__last_layer_name return self.__layers[name] def is_trainable(self): """ Getter for the trainable flag. """ return self.__trainable def set_weights(self, weights_values: dict, ignore_missing=False): """ Sets the weights values of the network. :param weights_values: dictionary with weights for each layer """ network_name = self.__class__.__name__.lower() with tf.variable_scope(network_name): for layer_name in weights_values: with tf.variable_scope(layer_name, reuse=True): for param_name, data in weights_values[layer_name].items(): try: var = tf.get_variable(param_name) self._session.run(var.assign(data)) except ValueError: if not ignore_missing: raise def _feed(self, image): raise NotImplementedError("Method not implemented.")
h2oai/datatable
datatable/xls.py
_parse_row
python
def _parse_row(rowvalues, rowtypes): n = len(rowvalues) assert n == len(rowtypes) if not n: return [] range_start = None ranges = [] for i in range(n): ctype = rowtypes[i] cval = rowvalues[i] # Check whether the cell is empty or not. If it is empty, and there is # an active range being tracked - terminate it. On the other hand, if # the cell is not empty and there isn't an active range, then start it. if ctype == 0 or ctype == 6 or (ctype == 1 and (cval == "" or cval.isspace())): if range_start is not None: ranges.append((range_start, i)) range_start = None else: if range_start is None: range_start = i if range_start is not None: ranges.append((range_start, n)) return ranges
Scan a single row from an Excel file, and return the list of ranges corresponding to each consecutive span of non-empty cells in this row. If all cells are empty, return an empty list. Each "range" in the list is a tuple of the form `(startcol, endcol)`. For example, if the row is the following: [ ][ 1.0 ][ 23 ][ "foo" ][ ][ "hello" ][ ] then the returned list of ranges will be: [(1, 4), (5, 6)] This algorithm considers a cell to be empty if its type is 0 (XL_EMPTY), or 6 (XL_BLANK), or if it's a text cell containing empty string, or a whitespace-only string. Numeric `0` is not considered empty.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/xls.py#L106-L147
null
#!/usr/bin/env python3 #------------------------------------------------------------------------------- # Copyright 2018 H2O.ai # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------- import datatable as dt import re from datatable.utils.typechecks import TValueError def read_xls_workbook(filename, subpath): try: import xlrd except ImportError: raise TValueError("Module `xlrd` is required in order to read " "Excel file '%s'" % filename) if subpath: wb = xlrd.open_workbook(filename, on_demand=True, ragged_rows=True) range2d = None if subpath in wb.sheet_names(): sheetname = subpath else: if "/" in subpath: sheetname, xlsrange = subpath.rsplit('/', 1) range2d = _excel_coords_to_range2d(xlsrange) if not(sheetname in wb.sheet_names() and range2d is not None): raise TValueError("Sheet `%s` is not found in the XLS file" % subpath) ws = wb.sheet_by_name(sheetname) result = read_xls_worksheet(ws, range2d) else: wb = xlrd.open_workbook(filename, ragged_rows=True) result = {} for ws in wb.sheets(): out = read_xls_worksheet(ws) if out is None: continue for i, frame in out.items(): result["%s/%s" % (ws.name, i)] = frame if len(result) == 0: return None elif len(result) == 1: for v in result.values(): return v else: return result def read_xls_worksheet(ws, subrange=None): # Get the worksheet's internal data arrays directly, for efficienct values = ws._cell_values types = ws._cell_types assert len(values) == len(types) # If the worksheet is empty, skip it if not values: return None if subrange is None: ranges2d = _combine_ranges([_parse_row(values[i], types[i]) for i in range(len(values))]) _process_merged_cells(ranges2d, ws.merged_cells) ranges2d.sort(key=lambda x: -(x[1] - x[0]) * (x[3] - x[2])) else: ranges2d = [subrange] results = {} for range2d in ranges2d: row0, row1, col0, col1 = range2d ncols = col1 - col0 if row0 < len(values): colnames = [str(n) for n in values[row0][col0:col1]] if len(colnames) < ncols: colnames += [None] * (ncols - len(colnames)) else: colnames = [None] * ncols rowdata = [] if row1 > len(values): values += [[]] * (row1 - len(values)) for irow in range(row0 + 1, row1): vv = values[irow] row = tuple(vv[col0:col1]) if len(row) < ncols: row += (None,) * (ncols - len(row)) rowdata.append(row) frame = dt.Frame(rowdata, names=colnames) results[_range2d_to_excel_coords(range2d)] = frame return results def _combine_ranges(ranges): """ This function takes a list of row-ranges (as returned by `_parse_row`) ordered by rows, and produces a list of distinct rectangular ranges within this grid. Within this function we define a 2d-range as a rectangular set of cells such that: - there are no empty rows / columns within this rectangle; - the rectangle is surrounded by empty rows / columns on all sides; - no subset of this rectangle comprises a valid 2d-range; - separate 2d-ranges are allowed to touch at a corner. """ ranges2d = [] for irow, rowranges in enumerate(ranges): ja = 0 jb = 0 while jb < len(rowranges): bcol0, bcol1 = rowranges[jb] if ja < len(ranges2d): _, arow1, acol0, acol1 = ranges2d[ja] if arow1 < irow: ja += 1 continue assert arow1 == irow or arow1 == irow + 1 else: acol0 = acol1 = 1000000000 if bcol0 == acol0 and bcol1 == acol1: ranges2d[ja][1] = irow + 1 ja += 1 jb += 1 elif bcol1 <= acol0: ranges2d.insert(ja, [irow, irow + 1, bcol0, bcol1]) ja += 1 jb += 1 elif bcol0 >= acol1: ja += 1 else: assert ja < len(ranges2d) ranges2d[ja][1] = irow + 1 if bcol0 < acol0: ranges2d[ja][2] = bcol0 if bcol1 > acol1: ranges2d[ja][3] = acol1 = bcol1 ja = _collapse_ranges(ranges2d, ja) jb += 1 return ranges2d def _collapse_ranges(ranges, ja): """ Within the `ranges` list find those 2d-ranges that overlap with `ranges[ja]` and merge them into `ranges[ja]`. Finally, return the new index of the ja-th range within the `ranges` list. """ arow0, _, acol0, acol1 = ranges[ja] jb = 0 while jb < len(ranges): if jb == ja: jb += 1 continue brow0, brow1, bcol0, bcol1 = ranges[jb] if bcol0 <= acol1 and brow1 >= arow0 and \ not(bcol0 == acol1 and brow1 == arow0): ranges[ja][0] = arow0 = min(arow0, brow0) ranges[ja][3] = acol1 = max(acol1, bcol1) del ranges[jb] if jb < ja: ja -= 1 else: jb += 1 return ja def _process_merged_cells(ranges, merged_cells): for mc in merged_cells: mrow0, mrow1, mcol0, mcol1 = mc for j, rng in enumerate(ranges): jrow0, jrow1, jcol0, jcol1 = rng if mrow0 > jrow1 or mrow1 < jrow0: continue if mcol0 > jcol1 or mcol1 < jcol0: continue if mrow0 >= jrow0 and mrow1 <= jrow1 and \ mcol0 >= jcol0 and mcol1 <= jcol1: continue if mrow0 < jrow0: rng[0] = mrow0 if mrow1 > jrow1: rng[1] = mrow1 if mcol0 < jcol0: rng[2] = mcol0 if mcol1 > jcol1: rng[3] = mcol1 _collapse_ranges(ranges, j) break def _range2d_to_excel_coords(range2d): def colname(i): r = "" while i >= 0: r = chr(ord('A') + i % 26) + r i = (i // 26) - 1 return r row0, row1, col0, col1 = range2d return "%s%d:%s%d" % (colname(col0), row0 + 1, colname(col1 - 1), row1) def _excel_coords_to_range2d(ec): def colindex(n): i = 0 for c in n: i = i * 26 + (ord(c) - ord('A')) + 1 return i - 1 mm = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", ec) if not mm: return None row0 = int(mm.group(2)) - 1 row1 = int(mm.group(4)) col0 = colindex(mm.group(1)) col1 = colindex(mm.group(3)) + 1 if row0 > row1: row0, row1 = row1, row0 if col0 > col1: col0, col1 = col1, col0 return (row0, row1, col0, col1)
h2oai/datatable
datatable/xls.py
_combine_ranges
python
def _combine_ranges(ranges): ranges2d = [] for irow, rowranges in enumerate(ranges): ja = 0 jb = 0 while jb < len(rowranges): bcol0, bcol1 = rowranges[jb] if ja < len(ranges2d): _, arow1, acol0, acol1 = ranges2d[ja] if arow1 < irow: ja += 1 continue assert arow1 == irow or arow1 == irow + 1 else: acol0 = acol1 = 1000000000 if bcol0 == acol0 and bcol1 == acol1: ranges2d[ja][1] = irow + 1 ja += 1 jb += 1 elif bcol1 <= acol0: ranges2d.insert(ja, [irow, irow + 1, bcol0, bcol1]) ja += 1 jb += 1 elif bcol0 >= acol1: ja += 1 else: assert ja < len(ranges2d) ranges2d[ja][1] = irow + 1 if bcol0 < acol0: ranges2d[ja][2] = bcol0 if bcol1 > acol1: ranges2d[ja][3] = acol1 = bcol1 ja = _collapse_ranges(ranges2d, ja) jb += 1 return ranges2d
This function takes a list of row-ranges (as returned by `_parse_row`) ordered by rows, and produces a list of distinct rectangular ranges within this grid. Within this function we define a 2d-range as a rectangular set of cells such that: - there are no empty rows / columns within this rectangle; - the rectangle is surrounded by empty rows / columns on all sides; - no subset of this rectangle comprises a valid 2d-range; - separate 2d-ranges are allowed to touch at a corner.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/xls.py#L150-L197
[ "def _collapse_ranges(ranges, ja):\n \"\"\"\n Within the `ranges` list find those 2d-ranges that overlap with `ranges[ja]`\n and merge them into `ranges[ja]`. Finally, return the new index of the\n ja-th range within the `ranges` list.\n \"\"\"\n arow0, _, acol0, acol1 = ranges[ja]\n jb = 0\n while jb < len(ranges):\n if jb == ja:\n jb += 1\n continue\n brow0, brow1, bcol0, bcol1 = ranges[jb]\n if bcol0 <= acol1 and brow1 >= arow0 and \\\n not(bcol0 == acol1 and brow1 == arow0):\n ranges[ja][0] = arow0 = min(arow0, brow0)\n ranges[ja][3] = acol1 = max(acol1, bcol1)\n del ranges[jb]\n if jb < ja:\n ja -= 1\n else:\n jb += 1\n return ja\n" ]
#!/usr/bin/env python3 #------------------------------------------------------------------------------- # Copyright 2018 H2O.ai # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------- import datatable as dt import re from datatable.utils.typechecks import TValueError def read_xls_workbook(filename, subpath): try: import xlrd except ImportError: raise TValueError("Module `xlrd` is required in order to read " "Excel file '%s'" % filename) if subpath: wb = xlrd.open_workbook(filename, on_demand=True, ragged_rows=True) range2d = None if subpath in wb.sheet_names(): sheetname = subpath else: if "/" in subpath: sheetname, xlsrange = subpath.rsplit('/', 1) range2d = _excel_coords_to_range2d(xlsrange) if not(sheetname in wb.sheet_names() and range2d is not None): raise TValueError("Sheet `%s` is not found in the XLS file" % subpath) ws = wb.sheet_by_name(sheetname) result = read_xls_worksheet(ws, range2d) else: wb = xlrd.open_workbook(filename, ragged_rows=True) result = {} for ws in wb.sheets(): out = read_xls_worksheet(ws) if out is None: continue for i, frame in out.items(): result["%s/%s" % (ws.name, i)] = frame if len(result) == 0: return None elif len(result) == 1: for v in result.values(): return v else: return result def read_xls_worksheet(ws, subrange=None): # Get the worksheet's internal data arrays directly, for efficienct values = ws._cell_values types = ws._cell_types assert len(values) == len(types) # If the worksheet is empty, skip it if not values: return None if subrange is None: ranges2d = _combine_ranges([_parse_row(values[i], types[i]) for i in range(len(values))]) _process_merged_cells(ranges2d, ws.merged_cells) ranges2d.sort(key=lambda x: -(x[1] - x[0]) * (x[3] - x[2])) else: ranges2d = [subrange] results = {} for range2d in ranges2d: row0, row1, col0, col1 = range2d ncols = col1 - col0 if row0 < len(values): colnames = [str(n) for n in values[row0][col0:col1]] if len(colnames) < ncols: colnames += [None] * (ncols - len(colnames)) else: colnames = [None] * ncols rowdata = [] if row1 > len(values): values += [[]] * (row1 - len(values)) for irow in range(row0 + 1, row1): vv = values[irow] row = tuple(vv[col0:col1]) if len(row) < ncols: row += (None,) * (ncols - len(row)) rowdata.append(row) frame = dt.Frame(rowdata, names=colnames) results[_range2d_to_excel_coords(range2d)] = frame return results def _parse_row(rowvalues, rowtypes): """ Scan a single row from an Excel file, and return the list of ranges corresponding to each consecutive span of non-empty cells in this row. If all cells are empty, return an empty list. Each "range" in the list is a tuple of the form `(startcol, endcol)`. For example, if the row is the following: [ ][ 1.0 ][ 23 ][ "foo" ][ ][ "hello" ][ ] then the returned list of ranges will be: [(1, 4), (5, 6)] This algorithm considers a cell to be empty if its type is 0 (XL_EMPTY), or 6 (XL_BLANK), or if it's a text cell containing empty string, or a whitespace-only string. Numeric `0` is not considered empty. """ n = len(rowvalues) assert n == len(rowtypes) if not n: return [] range_start = None ranges = [] for i in range(n): ctype = rowtypes[i] cval = rowvalues[i] # Check whether the cell is empty or not. If it is empty, and there is # an active range being tracked - terminate it. On the other hand, if # the cell is not empty and there isn't an active range, then start it. if ctype == 0 or ctype == 6 or (ctype == 1 and (cval == "" or cval.isspace())): if range_start is not None: ranges.append((range_start, i)) range_start = None else: if range_start is None: range_start = i if range_start is not None: ranges.append((range_start, n)) return ranges def _collapse_ranges(ranges, ja): """ Within the `ranges` list find those 2d-ranges that overlap with `ranges[ja]` and merge them into `ranges[ja]`. Finally, return the new index of the ja-th range within the `ranges` list. """ arow0, _, acol0, acol1 = ranges[ja] jb = 0 while jb < len(ranges): if jb == ja: jb += 1 continue brow0, brow1, bcol0, bcol1 = ranges[jb] if bcol0 <= acol1 and brow1 >= arow0 and \ not(bcol0 == acol1 and brow1 == arow0): ranges[ja][0] = arow0 = min(arow0, brow0) ranges[ja][3] = acol1 = max(acol1, bcol1) del ranges[jb] if jb < ja: ja -= 1 else: jb += 1 return ja def _process_merged_cells(ranges, merged_cells): for mc in merged_cells: mrow0, mrow1, mcol0, mcol1 = mc for j, rng in enumerate(ranges): jrow0, jrow1, jcol0, jcol1 = rng if mrow0 > jrow1 or mrow1 < jrow0: continue if mcol0 > jcol1 or mcol1 < jcol0: continue if mrow0 >= jrow0 and mrow1 <= jrow1 and \ mcol0 >= jcol0 and mcol1 <= jcol1: continue if mrow0 < jrow0: rng[0] = mrow0 if mrow1 > jrow1: rng[1] = mrow1 if mcol0 < jcol0: rng[2] = mcol0 if mcol1 > jcol1: rng[3] = mcol1 _collapse_ranges(ranges, j) break def _range2d_to_excel_coords(range2d): def colname(i): r = "" while i >= 0: r = chr(ord('A') + i % 26) + r i = (i // 26) - 1 return r row0, row1, col0, col1 = range2d return "%s%d:%s%d" % (colname(col0), row0 + 1, colname(col1 - 1), row1) def _excel_coords_to_range2d(ec): def colindex(n): i = 0 for c in n: i = i * 26 + (ord(c) - ord('A')) + 1 return i - 1 mm = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", ec) if not mm: return None row0 = int(mm.group(2)) - 1 row1 = int(mm.group(4)) col0 = colindex(mm.group(1)) col1 = colindex(mm.group(3)) + 1 if row0 > row1: row0, row1 = row1, row0 if col0 > col1: col0, col1 = col1, col0 return (row0, row1, col0, col1)
h2oai/datatable
datatable/xls.py
_collapse_ranges
python
def _collapse_ranges(ranges, ja): arow0, _, acol0, acol1 = ranges[ja] jb = 0 while jb < len(ranges): if jb == ja: jb += 1 continue brow0, brow1, bcol0, bcol1 = ranges[jb] if bcol0 <= acol1 and brow1 >= arow0 and \ not(bcol0 == acol1 and brow1 == arow0): ranges[ja][0] = arow0 = min(arow0, brow0) ranges[ja][3] = acol1 = max(acol1, bcol1) del ranges[jb] if jb < ja: ja -= 1 else: jb += 1 return ja
Within the `ranges` list find those 2d-ranges that overlap with `ranges[ja]` and merge them into `ranges[ja]`. Finally, return the new index of the ja-th range within the `ranges` list.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/xls.py#L200-L222
null
#!/usr/bin/env python3 #------------------------------------------------------------------------------- # Copyright 2018 H2O.ai # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------- import datatable as dt import re from datatable.utils.typechecks import TValueError def read_xls_workbook(filename, subpath): try: import xlrd except ImportError: raise TValueError("Module `xlrd` is required in order to read " "Excel file '%s'" % filename) if subpath: wb = xlrd.open_workbook(filename, on_demand=True, ragged_rows=True) range2d = None if subpath in wb.sheet_names(): sheetname = subpath else: if "/" in subpath: sheetname, xlsrange = subpath.rsplit('/', 1) range2d = _excel_coords_to_range2d(xlsrange) if not(sheetname in wb.sheet_names() and range2d is not None): raise TValueError("Sheet `%s` is not found in the XLS file" % subpath) ws = wb.sheet_by_name(sheetname) result = read_xls_worksheet(ws, range2d) else: wb = xlrd.open_workbook(filename, ragged_rows=True) result = {} for ws in wb.sheets(): out = read_xls_worksheet(ws) if out is None: continue for i, frame in out.items(): result["%s/%s" % (ws.name, i)] = frame if len(result) == 0: return None elif len(result) == 1: for v in result.values(): return v else: return result def read_xls_worksheet(ws, subrange=None): # Get the worksheet's internal data arrays directly, for efficienct values = ws._cell_values types = ws._cell_types assert len(values) == len(types) # If the worksheet is empty, skip it if not values: return None if subrange is None: ranges2d = _combine_ranges([_parse_row(values[i], types[i]) for i in range(len(values))]) _process_merged_cells(ranges2d, ws.merged_cells) ranges2d.sort(key=lambda x: -(x[1] - x[0]) * (x[3] - x[2])) else: ranges2d = [subrange] results = {} for range2d in ranges2d: row0, row1, col0, col1 = range2d ncols = col1 - col0 if row0 < len(values): colnames = [str(n) for n in values[row0][col0:col1]] if len(colnames) < ncols: colnames += [None] * (ncols - len(colnames)) else: colnames = [None] * ncols rowdata = [] if row1 > len(values): values += [[]] * (row1 - len(values)) for irow in range(row0 + 1, row1): vv = values[irow] row = tuple(vv[col0:col1]) if len(row) < ncols: row += (None,) * (ncols - len(row)) rowdata.append(row) frame = dt.Frame(rowdata, names=colnames) results[_range2d_to_excel_coords(range2d)] = frame return results def _parse_row(rowvalues, rowtypes): """ Scan a single row from an Excel file, and return the list of ranges corresponding to each consecutive span of non-empty cells in this row. If all cells are empty, return an empty list. Each "range" in the list is a tuple of the form `(startcol, endcol)`. For example, if the row is the following: [ ][ 1.0 ][ 23 ][ "foo" ][ ][ "hello" ][ ] then the returned list of ranges will be: [(1, 4), (5, 6)] This algorithm considers a cell to be empty if its type is 0 (XL_EMPTY), or 6 (XL_BLANK), or if it's a text cell containing empty string, or a whitespace-only string. Numeric `0` is not considered empty. """ n = len(rowvalues) assert n == len(rowtypes) if not n: return [] range_start = None ranges = [] for i in range(n): ctype = rowtypes[i] cval = rowvalues[i] # Check whether the cell is empty or not. If it is empty, and there is # an active range being tracked - terminate it. On the other hand, if # the cell is not empty and there isn't an active range, then start it. if ctype == 0 or ctype == 6 or (ctype == 1 and (cval == "" or cval.isspace())): if range_start is not None: ranges.append((range_start, i)) range_start = None else: if range_start is None: range_start = i if range_start is not None: ranges.append((range_start, n)) return ranges def _combine_ranges(ranges): """ This function takes a list of row-ranges (as returned by `_parse_row`) ordered by rows, and produces a list of distinct rectangular ranges within this grid. Within this function we define a 2d-range as a rectangular set of cells such that: - there are no empty rows / columns within this rectangle; - the rectangle is surrounded by empty rows / columns on all sides; - no subset of this rectangle comprises a valid 2d-range; - separate 2d-ranges are allowed to touch at a corner. """ ranges2d = [] for irow, rowranges in enumerate(ranges): ja = 0 jb = 0 while jb < len(rowranges): bcol0, bcol1 = rowranges[jb] if ja < len(ranges2d): _, arow1, acol0, acol1 = ranges2d[ja] if arow1 < irow: ja += 1 continue assert arow1 == irow or arow1 == irow + 1 else: acol0 = acol1 = 1000000000 if bcol0 == acol0 and bcol1 == acol1: ranges2d[ja][1] = irow + 1 ja += 1 jb += 1 elif bcol1 <= acol0: ranges2d.insert(ja, [irow, irow + 1, bcol0, bcol1]) ja += 1 jb += 1 elif bcol0 >= acol1: ja += 1 else: assert ja < len(ranges2d) ranges2d[ja][1] = irow + 1 if bcol0 < acol0: ranges2d[ja][2] = bcol0 if bcol1 > acol1: ranges2d[ja][3] = acol1 = bcol1 ja = _collapse_ranges(ranges2d, ja) jb += 1 return ranges2d def _process_merged_cells(ranges, merged_cells): for mc in merged_cells: mrow0, mrow1, mcol0, mcol1 = mc for j, rng in enumerate(ranges): jrow0, jrow1, jcol0, jcol1 = rng if mrow0 > jrow1 or mrow1 < jrow0: continue if mcol0 > jcol1 or mcol1 < jcol0: continue if mrow0 >= jrow0 and mrow1 <= jrow1 and \ mcol0 >= jcol0 and mcol1 <= jcol1: continue if mrow0 < jrow0: rng[0] = mrow0 if mrow1 > jrow1: rng[1] = mrow1 if mcol0 < jcol0: rng[2] = mcol0 if mcol1 > jcol1: rng[3] = mcol1 _collapse_ranges(ranges, j) break def _range2d_to_excel_coords(range2d): def colname(i): r = "" while i >= 0: r = chr(ord('A') + i % 26) + r i = (i // 26) - 1 return r row0, row1, col0, col1 = range2d return "%s%d:%s%d" % (colname(col0), row0 + 1, colname(col1 - 1), row1) def _excel_coords_to_range2d(ec): def colindex(n): i = 0 for c in n: i = i * 26 + (ord(c) - ord('A')) + 1 return i - 1 mm = re.match(r"([A-Z]+)(\d+):([A-Z]+)(\d+)", ec) if not mm: return None row0 = int(mm.group(2)) - 1 row1 = int(mm.group(4)) col0 = colindex(mm.group(1)) col1 = colindex(mm.group(3)) + 1 if row0 > row1: row0, row1 = row1, row0 if col0 > col1: col0, col1 = col1, col0 return (row0, row1, col0, col1)
h2oai/datatable
ci/setup_utils.py
find_linked_dynamic_libraries
python
def find_linked_dynamic_libraries(): with TaskContext("Find the required dynamic libraries") as log: llvm = get_llvm() libs = required_link_libraries() resolved = [] for libname in libs: if llvm: fullpath = os.path.join(llvm, "lib", libname) if os.path.isfile(fullpath): resolved.append(fullpath) log.info("Library `%s` found at %s" % (libname, fullpath)) continue else: log.info("%s does not exist" % fullpath) # Rely on the shell `locate` command to find the dynamic libraries. proc = subprocess.Popen(["locate", libname], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() if proc.returncode == 0: results = stdout.decode().strip().split("\n") results = [r for r in results if r] if results: results.sort(key=len) fullpath = results[0] assert os.path.isfile(fullpath), "Invalid path: %r" % (fullpath,) resolved.append(fullpath) log.info("Library `%s` found at %s" % (libname, fullpath)) continue else: log.fatal("Cannot locate dynamic library `%s`" % libname) else: log.fatal("`locate` command returned the following error:\n%s" % stderr.decode()) return resolved
This function attempts to locate the required link libraries, and returns them as a list of absolute paths.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/ci/setup_utils.py#L589-L626
[ "def get_llvm(with_version=False):\n global g_llvmdir, g_llvmver\n if g_llvmdir is Ellipsis:\n with TaskContext(\"Find an LLVM installation\") as log:\n g_llvmdir = None\n g_llvmver = None\n for LLVMX in [\"LLVM\", \"LLVM7\", \"LLVM6\", \"LLVM5\", \"LLVM4\"]:\n g_llvmdir = os.environ.get(LLVMX)\n if g_llvmdir:\n log.info(\"Environment variable %s = %s\"\n % (LLVMX, g_llvmdir))\n if not os.path.isdir(g_llvmdir):\n log.fatal(\"Environment variable %s = %r is not a \"\n \"valid directory\" % (LLVMX, g_llvmdir))\n g_llvmver = LLVMX\n break\n else:\n log.info(\"Environment variable %s is not set\" % LLVMX)\n if not g_llvmdir:\n candidate_dirs = [\"/usr/local/opt/llvm\"]\n for cdir in candidate_dirs:\n if os.path.isdir(cdir):\n log.info(\"Directory `%s` found\" % cdir)\n g_llvmdir = cdir\n break\n else:\n log.info(\"Candidate directory `%s` not found\" % cdir)\n if g_llvmdir:\n if not g_llvmver or g_llvmver == \"LLVM\":\n try:\n llc = os.path.join(g_llvmdir, \"bin/llvm-config\")\n if os.path.exists(llc):\n out = subprocess.check_output([llc, \"--version\"])\n version = out.decode().strip()\n g_llvmver = \"LLVM\" + version.split('.')[0]\n except Exception as e:\n log.info(\"%s when running llvm-config\" % str(e))\n g_llvmver = \"LLVM\"\n log.info(\"Llvm directory: %s\" % g_llvmdir)\n log.info(\"Version: %s\" % g_llvmver.lower())\n else:\n log.info(\"The build will proceed without Llvm support\")\n if with_version:\n return g_llvmdir, g_llvmver\n else:\n return g_llvmdir\n", "def required_link_libraries():\n # GCC on Ubuntu18.04 links to the following libraries (`ldd`):\n # linux-vdso.so.1\n # /lib64/ld-linux-x86-64.so.2\n # libstdc++.so.6 => /usr/lib/x86_64-linux-gnu/libstdc++.so.6\n # libgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1\n # libpthread.so.0 => /lib/x86_64-linux-gnu/libpthread.so.0\n # libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6\n # libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6\n # libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2\n # These are all standard system libraries, so there is no need to bundle\n # them.\n if is_gcc():\n return []\n\n # Clang on MacOS links to the following libraries (`otool -L`):\n # @rpath/libc++.1.dylib\n # /usr/lib/libSystem.B.dylib\n # In addition, `libc++abi.1.dylib` (or `libc++abi.dylib`) is referenced\n # from `libc++.1.dylib`.\n # The @rpath- libraries have to be bundled into the datatable package.\n #\n if is_clang():\n if ismacos():\n return [\"libc++.1.dylib\", \"libc++abi.dylib\"]\n if islinux():\n return [\"libc++.so.1\", \"libc++abi.so.1\"]\n return []\n" ]
#!/usr/bin/env python # -*- coding: utf-8 -*- #------------------------------------------------------------------------------- # Copyright 2018 H2O.ai # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. #------------------------------------------------------------------------------- import os import re import subprocess import sys import sysconfig import tempfile from functools import lru_cache as memoize from distutils.errors import DistutilsExecError, CompileError __all__ = ( "find_linked_dynamic_libraries", "get_compiler", "get_datatable_version", "get_default_compile_flags", "get_extra_compile_flags", "get_extra_link_args", "get_llvm", "get_rpath", "islinux", "ismacos", "iswindows", "make_git_version_file", "monkey_patch_compiler", "required_link_libraries", "TaskContext", ) verbose = True colored = sys.stdout.isatty() #------------------------------------------------------------------------------- # Output helpers #------------------------------------------------------------------------------- class TaskContext: """ Use this clas for pretty-printing every step of the build process: with TaskContext("Important step") as log: log.info("step1") log.warn("something not right") log.fatal("cannot proceed...") """ def __init__(self, head): if colored: self._msgs = "\x1B[38;5;111m" + head + "\x1B[0m\n" else: self._msgs = head + "\n" def __enter__(self): return self def __exit__(self, ttype, value, traceback): self.emit() def info(self, msg): if colored: self._msgs += "\x1B[38;5;240m " + msg + "\x1B[0m\n" else: self._msgs += " %s\n" % msg def warn(self, msg): if colored: self._msgs += "\x1B[38;5;220m Warning: " + msg + "\x1B[0m\n" else: self._msgs += " Warning: " + msg + "\n" def emit(self): if verbose: print(self._msgs) self._msgs = "" def fatal(self, msg): # The SystemExit exception causes the error message to print without # any traceback. Also add some red color for dramatic effect. raise SystemExit("\x1B[91m\nSystemExit: %s\n\x1B[39m" % msg) #------------------------------------------------------------------------------- # Build process helpers #------------------------------------------------------------------------------- def dtroot(): return os.path.abspath(os.path.join(os.path.basename(__file__), "..")) def islinux(): return sys.platform == "linux" def ismacos(): return sys.platform == "darwin" def iswindows(): return sys.platform == "win32" def get_datatable_version(): with TaskContext("Determine datatable version") as log: version = None filename = os.path.join(dtroot(), "datatable", "__version__.py") if not os.path.isfile(filename): log.fatal("The version file %s does not exist" % filename) log.info("Reading file " + filename) with open(filename, encoding="utf-8") as f: rx = re.compile(r"version\s*=\s*['\"]([\d.]*)['\"]\s*") for line in f: mm = re.match(rx, line) if mm is not None: version = mm.group(1) log.info("Detected version: " + version) break if version is None: log.fatal("Could not detect project version from the " "__version__.py file") # Append build suffix if necessary suffix = os.environ.get("CI_VERSION_SUFFIX") if suffix: # See https://www.python.org/dev/peps/pep-0440/ for the acceptable # versioning schemes. log.info("... appending version suffix " + suffix) mm = re.match(r"(?:master|dev)[.+_-]?(\d+)", suffix) if mm: suffix = "dev" + str(mm.group(1)) version += "." + suffix log.info("Final version = " + version) return version def make_git_version_file(force): import subprocess with TaskContext("Generate git version file" + " (force)" * force) as log: filename = os.path.join(dtroot(), "datatable", "__git__.py") # Try to read git revision from env. githash = os.getenv('DTBL_GIT_HASH', None) if githash: log.info("Environment variable DTBL_GIT_HASH = " + githash) else: log.info("Environment variable DTBL_GIT_HASH not present") # Need to get git revision from git cmd. Fail if .git dir is not # accessible. gitdir = os.path.join(dtroot(), ".git") if not os.path.isdir(gitdir): # Check whether the file below is present. If not, this must # be a source distribution, and it will not be possible to # rebuild the __git__.py file. testfile = os.path.join(dtroot(), "ci", "Jenkinsfile.groovy") if not os.path.isfile(testfile) and os.path.isfile(filename): log.info("Source distribution detected, file __git__.py " "cannot be rebuilt") return if force: log.fatal("Cannot determine git revision of the package " "because folder `%s` is missing and environment " "variable DTBL_GIT_HASH is not set." % gitdir) log.info("Directory .git not found") log.warn("Could not generate __git__.py") return # Read git revision using cmd. out = subprocess.check_output(["git", "rev-parse", "HEAD"]) githash = out.decode("ascii").strip() log.info("`git rev-parse HEAD` = " + githash) log.info("Generating file " + filename) with open(filename, "w", encoding="utf-8") as o: o.write( "#!/usr/bin/env python3\n" "# © H2O.ai 2018; -*- encoding: utf-8 -*-\n" "# This Source Code Form is subject to the terms of the\n" "# Mozilla Public License, v2.0. If a copy of the MPL was\n" "# not distributed with this file, You can obtain one at\n" "# http://mozilla.org/MPL/2.0/.\n" "# ----------------------------------------------------------\n" "# This file was auto-generated from ci/setup_utils.py\n\n" "__git_revision__ = \'%s\'\n" % githash) g_llvmdir = ... g_llvmver = ... def get_llvm(with_version=False): global g_llvmdir, g_llvmver if g_llvmdir is Ellipsis: with TaskContext("Find an LLVM installation") as log: g_llvmdir = None g_llvmver = None for LLVMX in ["LLVM", "LLVM7", "LLVM6", "LLVM5", "LLVM4"]: g_llvmdir = os.environ.get(LLVMX) if g_llvmdir: log.info("Environment variable %s = %s" % (LLVMX, g_llvmdir)) if not os.path.isdir(g_llvmdir): log.fatal("Environment variable %s = %r is not a " "valid directory" % (LLVMX, g_llvmdir)) g_llvmver = LLVMX break else: log.info("Environment variable %s is not set" % LLVMX) if not g_llvmdir: candidate_dirs = ["/usr/local/opt/llvm"] for cdir in candidate_dirs: if os.path.isdir(cdir): log.info("Directory `%s` found" % cdir) g_llvmdir = cdir break else: log.info("Candidate directory `%s` not found" % cdir) if g_llvmdir: if not g_llvmver or g_llvmver == "LLVM": try: llc = os.path.join(g_llvmdir, "bin/llvm-config") if os.path.exists(llc): out = subprocess.check_output([llc, "--version"]) version = out.decode().strip() g_llvmver = "LLVM" + version.split('.')[0] except Exception as e: log.info("%s when running llvm-config" % str(e)) g_llvmver = "LLVM" log.info("Llvm directory: %s" % g_llvmdir) log.info("Version: %s" % g_llvmver.lower()) else: log.info("The build will proceed without Llvm support") if with_version: return g_llvmdir, g_llvmver else: return g_llvmdir def get_rpath(): if ismacos(): return "@loader_path/." else: return "$ORIGIN/." def print_compiler_version(log, cc): cmd = [cc, "--version"] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) stdout, _ = proc.communicate() stdout = stdout.decode().strip() log.info(" ".join(cmd) + " :") for line in stdout.split("\n"): log.info(" " + line.strip()) @memoize() def get_compiler(): with TaskContext("Determine the compiler") as log: for envvar in ["CXX", "CC"]: cc = os.environ.get(envvar, None) if cc: log.info("Using compiler from environment variable `%s`: %s" % (envvar, cc)) print_compiler_version(log, cc) return cc else: log.info("Environment variable `%s` is not set" % envvar) llvm = get_llvm() if llvm: cc = os.path.join(llvm, "bin", "clang++") if iswindows(): cc += ".exe" if os.path.isfile(cc): log.info("Found Clang compiler %s" % cc) print_compiler_version(log, cc) return cc else: log.info("Cannot find Clang compiler at %s" % cc) cc = None else: log.info("Llvm installation not found, cannot search for the " "clang++ compiler") fname = None outname = None try: fd, fname = tempfile.mkstemp(suffix=".c") outname = fname + ".out" os.close(fd) assert os.path.isfile(fname) candidate_compilers = ["clang++", "gcc"] try: import distutils.ccompiler cc = distutils.ccompiler.new_compiler() ccname = cc.executables["compiler_cxx"][0] candidate_compilers.append(ccname) log.info("distutils.ccompiler reports the default compiler to " "be `%s`" % (ccname, )) except Exception as e: log.info(str(e)) for cc in candidate_compilers: if iswindows() and not cc.endswith(".exe"): cc += ".exe" try: cmd = [cc, "-c", fname, "-o", outname] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() stdout = stdout.decode().strip() stderr = stderr.decode().strip() if proc.returncode == 0: log.info("Compiler `%s` will be used" % cc) print_compiler_version(log, cc) return cc else: log.info("Compiler `%s` returned an error when " "compiling a blank file: <%s>" % (cc, stderr)) except FileNotFoundError: log.info("Compiler `%s` is not found" % cc) finally: if fname and os.path.isfile(fname): os.remove(fname) if outname and os.path.isfile(outname): os.remove(outname) log.fatal("Suitable C++ compiler cannot be determined. Please " "specify a compiler executable in the `CXX` environment " "variable.") @memoize() def is_gcc(): cc = get_compiler() return ("gcc" in cc or "g++" in cc) and ("clang" not in cc) @memoize() def is_clang(): return "clang" in get_compiler() #------------------------------------------------------------------------------- # Determine compiler settings #------------------------------------------------------------------------------- @memoize() def get_compile_includes(): includes = set() with TaskContext("Find compile include directories") as log: includes.add("c") log.info("`c` is the main C++ source directory") confincludepy = sysconfig.get_config_var("CONFINCLUDEPY") if confincludepy: includes.add(confincludepy) log.info("`%s` added from CONFINCLUDEPY" % confincludepy) sysprefixinclude = os.path.join(sys.prefix, "include") includes.add(sysprefixinclude) log.info("`%s` added from sys.prefix" % sysprefixinclude) # Include path to C++ header files llvmdir = get_llvm() if llvmdir: dir1 = os.path.join(llvmdir, "include") dir2 = os.path.join(llvmdir, "include/c++/v1") includes.add(dir1) includes.add(dir2) log.info("`%s` added from Llvm package" % dir1) log.info("`%s` added from Llvm package" % dir2) includes = list(includes) for i, d in enumerate(includes): if not os.path.isdir(d): includes[i] = None log.info("Directory `%s` not found, ignoring" % d) return sorted(i for i in includes if i is not None) def get_default_compile_flags(): flags = sysconfig.get_config_var("PY_CFLAGS") # remove -arch XXX flags, and add "-m64" to force 64-bit only builds flags = re.sub(r"-arch \w+\s*", " ", flags) + " -m64" # remove -WXXX flags, because we set up all warnings manually afterwards flags = re.sub(r"\s*-W[a-zA-Z\-]+\s*", " ", flags) # remove -O3 flag since we'll be setting it manually to either -O0 or -O3 # depending on the debug mode flags = re.sub(r"\s*-O\d\s*", " ", flags) # remove -DNDEBUG so that the program can use asserts if needed flags = re.sub(r"\s*-DNDEBUG\s*", " ", flags) # remove '=format-security' because this is not even a real flag... flags = re.sub(r"=format-security", "", flags) # Clear additional flags not recognized by Clang flags = re.sub(r"-fuse-linker-plugin", "", flags) flags = re.sub(r"-ffat-lto-objects", "", flags) # Add the python include dir as '-isystem' to prevent warnings in Python.h if sysconfig.get_config_var("CONFINCLUDEPY"): flags += " -isystem %s" % sysconfig.get_config_var("CONFINCLUDEPY") # Squash spaces flags = re.sub(r"\s+", " ", flags) return flags @memoize() def get_extra_compile_flags(): flags = [] with TaskContext("Determine the extra compiler flags") as log: flags += ["-std=c++11"] if is_clang(): flags += ["-stdlib=libc++"] # Path to source files / Python include files flags += ["-Ic"] # Generate 'Position-independent code'. This is required for any # dynamically-linked library. flags += ["-fPIC"] if "DTDEBUG" in os.environ: flags += ["-g3", "-ggdb", "-O0"] flags += ["-DDTTEST", "-DDTDEBUG"] elif "DTASAN" in os.environ: flags += ["-g3", "-ggdb", "-O0", "-DDTDEBUG", "-fsanitize=address", "-fno-omit-frame-pointer", "-fsanitize-address-use-after-scope", "-shared-libasan"] elif "DTCOVERAGE" in os.environ: flags += ["-g2", "--coverage", "-O0"] flags += ["-DDTTEST", "-DDTDEBUG"] else: # Optimize at best level, but still include some debug information flags += ["-g2", "-O3"] if "CI_EXTRA_COMPILE_ARGS" in os.environ: flags += [os.environ["CI_EXTRA_COMPILE_ARGS"]] if iswindows(): flags += ["/W4"] elif is_clang(): # Ignored warnings: # -Wc++98-compat-pedantic: # -Wc99-extensions: since we're targeting C++11, there is no need # to worry about compatibility with earlier C++ versions. # -Wfloat-equal: this warning is just plain wrong... # Comparing x == 0 or x == 1 is always safe. # -Wswitch-enum: generates spurious warnings about missing # cases even if `default` clause is present. -Wswitch # does not suffer from this drawback. # -Wweak-template-vtables: this waning's purpose is unclear, and # it is also unclear how to prevent it... # -Wglobal-constructors, -Wexit-time-destructors: having static # global objects is not only legal, but also unavoidable since # this is the only kind of object that can be passed to a # template... flags += [ "-Weverything", "-Wno-c++98-compat-pedantic", "-Wno-c99-extensions", "-Wno-exit-time-destructors", "-Wno-float-equal", "-Wno-global-constructors", "-Wno-switch-enum", "-Wno-weak-template-vtables", ] elif is_gcc(): # Ignored warnings: # -Wunused-value: generates spurious warnings for OMP code. # -Wunknown-pragmas: do not warn about clang-specific macros, # ignoring them is just fine... flags += [ "-Wall", "-Wno-unused-value", "-Wno-unknown-pragmas" ] for d in get_compile_includes(): flags += ["-I" + d] if d != "c": flags += ["-isystem", d] i = 0 while i < len(flags): flag = flags[i] if i + 1 < len(flags) and not flags[i + 1].startswith("-"): flag += " " + flags[i + 1] i += 1 log.info(flag) i += 1 return flags def get_default_link_flags(): # No need for TaskContext here, since this is executed in non-verbose # mode only. flags = sysconfig.get_config_var("LDSHARED") # remove the name of the linker program flags = re.sub(r"^\w+[\w.\-]+\s+", "", flags) # remove -arch XXX flags, and add "-m64" to force 64-bit only builds flags = re.sub(r"-arch \w+\s*", "", flags) + " -m64" return flags @memoize() def get_extra_link_args(): flags = [] with TaskContext("Determine the extra linker flags") as log: flags += ["-Wl,-rpath,%s" % get_rpath()] if islinux() and is_clang(): flags += ["-lc++"] if islinux(): # On linux we need to pass -shared flag to clang linker which # is not used for some reason flags += ["-shared"] if is_gcc(): flags += ["-lstdc++", "-lm"] if "DTASAN" in os.environ: flags += ["-fsanitize=address", "-shared-libasan"] if "DTCOVERAGE" in os.environ: flags += ["--coverage", "-O0"] libs = sorted(set(os.path.dirname(lib) for lib in find_linked_dynamic_libraries())) for lib in libs: flags += ["-L%s" % lib] for flag in flags: log.info(flag) return flags def required_link_libraries(): # GCC on Ubuntu18.04 links to the following libraries (`ldd`): # linux-vdso.so.1 # /lib64/ld-linux-x86-64.so.2 # libstdc++.so.6 => /usr/lib/x86_64-linux-gnu/libstdc++.so.6 # libgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1 # libpthread.so.0 => /lib/x86_64-linux-gnu/libpthread.so.0 # libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 # libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 # libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 # These are all standard system libraries, so there is no need to bundle # them. if is_gcc(): return [] # Clang on MacOS links to the following libraries (`otool -L`): # @rpath/libc++.1.dylib # /usr/lib/libSystem.B.dylib # In addition, `libc++abi.1.dylib` (or `libc++abi.dylib`) is referenced # from `libc++.1.dylib`. # The @rpath- libraries have to be bundled into the datatable package. # if is_clang(): if ismacos(): return ["libc++.1.dylib", "libc++abi.dylib"] if islinux(): return ["libc++.so.1", "libc++abi.so.1"] return [] @memoize() #------------------------------------------------------------------------------- # Augmented compiler #------------------------------------------------------------------------------- def monkey_patch_compiler(): from distutils.ccompiler import new_compiler from subprocess import check_output as run cc = new_compiler().__class__ class NewCC(cc): """ Extension of the standard compiler from distutils. This class adds a post-link stage where the dependencies of the dynamic library are verified, and fixed if necessary. """ def get_load_dylib_entries(self, executable, log): otool_result = run(["otool", "-L", executable]).decode() log.info("Checking dependencies of %s" % os.path.basename(executable)) log.info(" $ otool -L %s" % executable) execname = os.path.basename(executable) dylibs = [] for libinfo in otool_result.split('\n')[1:]: lib = libinfo.strip().split(' ', 1)[0] if lib and os.path.basename(lib) != execname: dylibs.append(lib) log.info(" %s" % lib) return dylibs def find_recursive_dependencies(self, out, executable, log): dylibs = self.get_load_dylib_entries(executable, log) for lib in dylibs: if lib in out: continue out.append(lib) if lib.startswith("/usr/lib/"): continue if lib.startswith("@rpath/"): resolved_name = os.path.join("datatable", "lib", lib[len("@rpath/"):]) if not os.path.isfile(resolved_name): raise SystemExit("Dependency %s does not exist" % resolved_name) else: resolved_name = lib if resolved_name == executable: continue self.find_recursive_dependencies(out, resolved_name, log) def relocate_dependencies(self, executable, log): dylibs = self.get_load_dylib_entries(executable, log) for lib in dylibs: if lib.startswith("/usr/lib/") or lib.startswith("@rpath/"): continue libname = os.path.basename(lib) newname = "@rpath/" + libname log.info("Relocating dependency %s" % os.path.basename(libname)) log.info(" $ install_name_tool -change %s %s %s" % (lib, newname, executable)) run(["install_name_tool", "-change", lib, newname, executable]) destname = os.path.join("datatable", "lib", libname) if not os.path.exists(destname): log.info("Copying %s -> %s" % (lib, destname)) shutil.copyfile(lib, destname) def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): if cc.__name__ == "UnixCCompiler": compiler_so = self.fixup_compiler(self.compiler_so, cc_args + extra_postargs) try: self.spawn(compiler_so + cc_args + [src, '-o', obj] + extra_postargs) except DistutilsExecError as msg: raise CompileError(msg) else: cc._compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts) def fixup_compiler(self, compiler_so, cc_args): if ismacos(): import _osx_support compiler_so = _osx_support.compiler_fixup(compiler_so, cc_args) for token in ["-Wstrict-prototypes", "-O2"]: if token in compiler_so: del compiler_so[compiler_so.index(token)] return compiler_so def link(self, *args, **kwargs): super().link(*args, **kwargs) outname = args[2] if len(args) >= 3 else kwargs["output_filename"] outdir = args[3] if len(args) >= 4 else kwargs["output_dir"] if outdir is not None: outname = os.path.join(outdir, outname) if ismacos(): self.postlink(outname) def postlink(self, outname): print() with TaskContext("Post-link processing") as log: log.info("Output file: %s" % outname) self.relocate_dependencies(outname, log) dylibs = [] self.find_recursive_dependencies(dylibs, outname, log) log.info("Resolved list of dependencies:") for lib in sorted(dylibs): log.info(" " + lib) vars(sys.modules[cc.__module__])[cc.__name__] = NewCC #------------------------------------------------------------------------------- # Run as a script #------------------------------------------------------------------------------- def usage(): print("Usage: \n" " python setup_utils.py CMD\n\n" "where CMD can be one of:\n" " ccflags\n" " compiler\n" " ext_suffix\n" " ldflags\n" " version\n" ) if __name__ == "__main__": verbose = False if len(sys.argv) == 2: cmd = sys.argv[1] if cmd == "--help": usage() elif cmd == "ccflags": os.environ["DTDEBUG"] = "1" # Force the debug flag flags = [get_default_compile_flags()] + get_extra_compile_flags() print(" ".join(flags)) elif cmd == "compiler": print(get_compiler()) elif cmd == "ext_suffix": print(sysconfig.get_config_var("EXT_SUFFIX")) elif cmd == "ldflags": os.environ["DTDEBUG"] = "1" # Force the debug flag flags = [get_default_link_flags()] + get_extra_link_args() print(" ".join(flags)) elif cmd == "version": make_git_version_file(True) print(get_datatable_version()) else: print("Unknown command: %s" % cmd) sys.exit(1) else: usage() sys.exit(1)
h2oai/datatable
datatable/utils/terminal.py
Terminal.wait_for_keypresses
python
def wait_for_keypresses(self, refresh_rate=1): if not self._enable_keyboard: return with self._blessed_term.cbreak(): while True: yield self._blessed_term.inkey(timeout=refresh_rate)
Listen to user's keystrokes and return them to caller one at a time. The produced values are instances of blessed.keyboard.Keystroke class. If the user did not press anything with the last `refresh_rate` seconds the generator will yield `None`, allowing the caller to perform any updates necessary. This generator is infinite, and thus needs to be stopped explicitly.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/utils/terminal.py#L144-L159
null
class Terminal: def __init__(self): self.jupyter = False self.ipython = False if sys.__stdin__ and sys.__stdout__: import blessed import _locale # Save current locale settings try: _lls = [] for i in range(100): ll = _locale.setlocale(i) _lls.append(ll) except _locale.Error: pass self._blessed_term = blessed.Terminal() # Restore previous locale settings for i, ll in enumerate(_lls): _locale.setlocale(i, ll) self._allow_unicode = False self._enable_keyboard = True self._enable_colors = True self._enable_terminal_codes = True self._encoding = self._blessed_term._encoding enc = self._encoding.upper() if enc == "UTF8" or enc == "UTF-8": self._allow_unicode = True self.is_a_tty = sys.__stdin__.isatty() and sys.__stdout__.isatty() self._width = 0 self._height = 0 self._check_ipython() else: self._enable_keyboard = False self._enable_colors = False self._enable_terminal_codes = False self._encoding = "UTF8" self._allow_unicode = True self.is_a_tty = False self._width = 80 self._height = 25 @property def width(self): return self._width or self._blessed_term.width @property def height(self): return self._height or self._blessed_term.height @property def is_utf8(self): return self._allow_unicode def length(self, x): return self._blessed_term.length(x) def clear_line(self, end=""): if self._enable_terminal_codes: print("\x1B[1G\x1B[K", end=end) def rewrite_lines(self, lines, nold, end=""): if self._enable_terminal_codes and nold: print("\x1B[1G\x1B[%dA" % nold + "\x1B[K\n".join(lines) + "\x1B[K", end=end) else: print("\n".join(lines), end=end) def _check_ipython(self): # When running inside a Jupyter notebook, IPython and ipykernel will # already be preloaded (in sys.modules). We don't want to try to # import them, because it adds unnecessary startup delays. if "IPython" in sys.modules: ipy = sys.modules["IPython"].get_ipython() ipy_type = str(type(ipy)) if "ZMQInteractiveShell" in ipy_type: self._encoding = "UTF8" self.jupyter = True elif "TerminalInteractiveShell" in ipy_type: self.ipython = True def using_colors(self): return self._enable_colors def use_colors(self, f): self._enable_colors = f def use_keyboard(self, f): self._enable_keyboard = f def use_terminal_codes(self, f): self._enable_terminal_codes = f def set_allow_unicode(self, v): self._allow_unicode = bool(v) def color(self, color, text): if self._enable_colors: return _default_palette[color] + text + "\x1B[m" else: return text def initialize_options(self, options): options.register_option( "display.use_colors", True, xtype=bool, onchange=self.use_colors, doc="Whether to use colors when printing various messages into\n" "the console. Turn this off if your terminal is unable to\n" "display ANSI escape sequences, or if the colors make output\n" "not legible.") options.register_option( "display.allow_unicode", self.is_utf8, xtype=bool, onchange=self.set_allow_unicode, doc="If True, datatable will allow unicode characters (encoded as\n" "UTF-8) to be printed into the output.\n" "If False, then unicode characters will either be avoided, or\n" "hex-escaped as necessary.")
h2oai/datatable
datatable/utils/misc.py
normalize_slice
python
def normalize_slice(e, n): if n == 0: return (0, 0, 1) step = e.step if step is None: step = 1 if step == 0: start = e.start count = e.stop if isinstance(start, int) and isinstance(count, int) and count >= 0: if start < 0: start += n if start < 0: return (0, 0, 0) return (start, count, 0) else: raise ValueError("Invalid slice %r" % e) assert isinstance(step, int) and step != 0 if e.start is None: start = 0 if step > 0 else n - 1 else: start = e.start if start < 0: start += n if (start < 0 and step < 0) or (start >= n and step > 0): return (0, 0, 0) start = min(max(0, start), n - 1) assert isinstance(start, int) and 0 <= start < n, \ "Invalid start: %r" % start if e.stop is None: if step > 0: count = (n - 1 - start) // step + 1 else: count = (start // -step) + 1 else: stop = e.stop if stop < 0: stop += n if step > 0: if stop > start: count = (min(n, stop) - 1 - start) // step + 1 else: count = 0 else: if stop < start: count = (start - max(stop, -1) - 1) // -step + 1 else: count = 0 assert isinstance(count, int) and count >= 0 assert count == 0 or 0 <= start + step * (count - 1) < n, \ "Wrong tuple: (%d, %d, %d)" % (start, count, step) return (start, count, step)
Return the slice tuple normalized for an ``n``-element object. :param e: a slice object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/utils/misc.py#L65-L127
null
#!/usr/bin/env python3 # © H2O.ai 2018; -*- encoding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #------------------------------------------------------------------------------- import importlib from .typechecks import TImportError __all__ = ("clamp", "normalize_slice", "normalize_range", "plural_form", "load_module", "humanize_bytes") def plural_form(n, singular=None, plural=None): nabs = abs(n) if nabs == 1: if singular: return "%d %s" % (n, singular) else: return str(n) else: if nabs < 100000: nstr = str(n) else: nstr = "" while nabs: if nabs < 1000: nstr = str(nabs) + nstr break else: nstr = ",%03d%s" % (nabs % 1000, nstr) nabs = nabs // 1000 if n < 0: nstr = "-" + nstr if not singular: return nstr if not plural: last_letter = singular[-1] prev_letter = singular[-2] if len(singular) >= 2 else "" if last_letter == "s" or last_letter == "x": plural = singular + "es" elif last_letter == "y": plural = singular[:-1] + "ies" elif last_letter == "f" and prev_letter != "f": plural = singular[:-1] + "ves" elif last_letter == "e" and prev_letter == "f": plural = singular[:-2] + "ves" elif last_letter == "h" and prev_letter in "sc": # Note: words ending in 'ch' which is pronounced as /k/ # are exception to this rule: monarch -> monarchs plural = singular + "es" else: plural = singular + "s" return "%s %s" % (nstr, plural) def clamp(x, lb, ub): """Return the value of ``x`` clamped to the range ``[lb, ub]``.""" return min(max(x, lb), ub) def normalize_range(e, n): """ Return the range tuple normalized for an ``n``-element object. The semantics of a range is slightly different than that of a slice. In particular, a range is similar to a list in meaning (and on Py2 it was eagerly expanded into a list). Thus we do not allow the range to generate indices that would be invalid for an ``n``-array. Furthermore, we restrict the range to produce only positive or only negative indices. For example, ``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing to treat the last "-1" as the last element in the list. :param e: a range object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``, or None if the range is invalid. """ if e.step > 0: count = max(0, (e.stop - e.start - 1) // e.step + 1) else: count = max(0, (e.start - e.stop - 1) // -e.step + 1) if count == 0: return (0, 0, e.step) start = e.start finish = e.start + (count - 1) * e.step if start >= 0: if start >= n or finish < 0 or finish >= n: return None else: start += n finish += n if start < 0 or start >= n or finish < 0 or finish >= n: return None assert count >= 0 return (start, count, e.step) def load_module(module): """ Import and return the requested module. """ try: m = importlib.import_module(module) return m except ModuleNotFoundError: # pragma: no cover raise TImportError("Module `%s` is not installed. It is required for " "running this function." % module) def humanize_bytes(size): """ Convert given number of bytes into a human readable representation, i.e. add prefix such as KB, MB, GB, etc. The `size` argument must be a non-negative integer. :param size: integer representing byte size of something :return: string representation of the size, in human-readable form """ if size == 0: return "0" if size is None: return "" assert size >= 0, "`size` cannot be negative, got %d" % size suffixes = "TGMK" maxl = len(suffixes) for i in range(maxl + 1): shift = (maxl - i) * 10 if size >> shift == 0: continue ndigits = 0 for nd in [3, 2, 1]: if size >> (shift + 12 - nd * 3) == 0: ndigits = nd break if ndigits == 0 or size == (size >> shift) << shift: rounded_val = str(size >> shift) else: rounded_val = "%.*f" % (ndigits, size / (1 << shift)) return "%s%sB" % (rounded_val, suffixes[i] if i < maxl else "")
h2oai/datatable
datatable/utils/misc.py
normalize_range
python
def normalize_range(e, n): if e.step > 0: count = max(0, (e.stop - e.start - 1) // e.step + 1) else: count = max(0, (e.start - e.stop - 1) // -e.step + 1) if count == 0: return (0, 0, e.step) start = e.start finish = e.start + (count - 1) * e.step if start >= 0: if start >= n or finish < 0 or finish >= n: return None else: start += n finish += n if start < 0 or start >= n or finish < 0 or finish >= n: return None assert count >= 0 return (start, count, e.step)
Return the range tuple normalized for an ``n``-element object. The semantics of a range is slightly different than that of a slice. In particular, a range is similar to a list in meaning (and on Py2 it was eagerly expanded into a list). Thus we do not allow the range to generate indices that would be invalid for an ``n``-array. Furthermore, we restrict the range to produce only positive or only negative indices. For example, ``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing to treat the last "-1" as the last element in the list. :param e: a range object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``, or None if the range is invalid.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/utils/misc.py#L130-L166
null
#!/usr/bin/env python3 # © H2O.ai 2018; -*- encoding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #------------------------------------------------------------------------------- import importlib from .typechecks import TImportError __all__ = ("clamp", "normalize_slice", "normalize_range", "plural_form", "load_module", "humanize_bytes") def plural_form(n, singular=None, plural=None): nabs = abs(n) if nabs == 1: if singular: return "%d %s" % (n, singular) else: return str(n) else: if nabs < 100000: nstr = str(n) else: nstr = "" while nabs: if nabs < 1000: nstr = str(nabs) + nstr break else: nstr = ",%03d%s" % (nabs % 1000, nstr) nabs = nabs // 1000 if n < 0: nstr = "-" + nstr if not singular: return nstr if not plural: last_letter = singular[-1] prev_letter = singular[-2] if len(singular) >= 2 else "" if last_letter == "s" or last_letter == "x": plural = singular + "es" elif last_letter == "y": plural = singular[:-1] + "ies" elif last_letter == "f" and prev_letter != "f": plural = singular[:-1] + "ves" elif last_letter == "e" and prev_letter == "f": plural = singular[:-2] + "ves" elif last_letter == "h" and prev_letter in "sc": # Note: words ending in 'ch' which is pronounced as /k/ # are exception to this rule: monarch -> monarchs plural = singular + "es" else: plural = singular + "s" return "%s %s" % (nstr, plural) def clamp(x, lb, ub): """Return the value of ``x`` clamped to the range ``[lb, ub]``.""" return min(max(x, lb), ub) def normalize_slice(e, n): """ Return the slice tuple normalized for an ``n``-element object. :param e: a slice object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``. """ if n == 0: return (0, 0, 1) step = e.step if step is None: step = 1 if step == 0: start = e.start count = e.stop if isinstance(start, int) and isinstance(count, int) and count >= 0: if start < 0: start += n if start < 0: return (0, 0, 0) return (start, count, 0) else: raise ValueError("Invalid slice %r" % e) assert isinstance(step, int) and step != 0 if e.start is None: start = 0 if step > 0 else n - 1 else: start = e.start if start < 0: start += n if (start < 0 and step < 0) or (start >= n and step > 0): return (0, 0, 0) start = min(max(0, start), n - 1) assert isinstance(start, int) and 0 <= start < n, \ "Invalid start: %r" % start if e.stop is None: if step > 0: count = (n - 1 - start) // step + 1 else: count = (start // -step) + 1 else: stop = e.stop if stop < 0: stop += n if step > 0: if stop > start: count = (min(n, stop) - 1 - start) // step + 1 else: count = 0 else: if stop < start: count = (start - max(stop, -1) - 1) // -step + 1 else: count = 0 assert isinstance(count, int) and count >= 0 assert count == 0 or 0 <= start + step * (count - 1) < n, \ "Wrong tuple: (%d, %d, %d)" % (start, count, step) return (start, count, step) def load_module(module): """ Import and return the requested module. """ try: m = importlib.import_module(module) return m except ModuleNotFoundError: # pragma: no cover raise TImportError("Module `%s` is not installed. It is required for " "running this function." % module) def humanize_bytes(size): """ Convert given number of bytes into a human readable representation, i.e. add prefix such as KB, MB, GB, etc. The `size` argument must be a non-negative integer. :param size: integer representing byte size of something :return: string representation of the size, in human-readable form """ if size == 0: return "0" if size is None: return "" assert size >= 0, "`size` cannot be negative, got %d" % size suffixes = "TGMK" maxl = len(suffixes) for i in range(maxl + 1): shift = (maxl - i) * 10 if size >> shift == 0: continue ndigits = 0 for nd in [3, 2, 1]: if size >> (shift + 12 - nd * 3) == 0: ndigits = nd break if ndigits == 0 or size == (size >> shift) << shift: rounded_val = str(size >> shift) else: rounded_val = "%.*f" % (ndigits, size / (1 << shift)) return "%s%sB" % (rounded_val, suffixes[i] if i < maxl else "")
h2oai/datatable
datatable/utils/misc.py
load_module
python
def load_module(module): try: m = importlib.import_module(module) return m except ModuleNotFoundError: # pragma: no cover raise TImportError("Module `%s` is not installed. It is required for " "running this function." % module)
Import and return the requested module.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/utils/misc.py#L170-L179
null
#!/usr/bin/env python3 # © H2O.ai 2018; -*- encoding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #------------------------------------------------------------------------------- import importlib from .typechecks import TImportError __all__ = ("clamp", "normalize_slice", "normalize_range", "plural_form", "load_module", "humanize_bytes") def plural_form(n, singular=None, plural=None): nabs = abs(n) if nabs == 1: if singular: return "%d %s" % (n, singular) else: return str(n) else: if nabs < 100000: nstr = str(n) else: nstr = "" while nabs: if nabs < 1000: nstr = str(nabs) + nstr break else: nstr = ",%03d%s" % (nabs % 1000, nstr) nabs = nabs // 1000 if n < 0: nstr = "-" + nstr if not singular: return nstr if not plural: last_letter = singular[-1] prev_letter = singular[-2] if len(singular) >= 2 else "" if last_letter == "s" or last_letter == "x": plural = singular + "es" elif last_letter == "y": plural = singular[:-1] + "ies" elif last_letter == "f" and prev_letter != "f": plural = singular[:-1] + "ves" elif last_letter == "e" and prev_letter == "f": plural = singular[:-2] + "ves" elif last_letter == "h" and prev_letter in "sc": # Note: words ending in 'ch' which is pronounced as /k/ # are exception to this rule: monarch -> monarchs plural = singular + "es" else: plural = singular + "s" return "%s %s" % (nstr, plural) def clamp(x, lb, ub): """Return the value of ``x`` clamped to the range ``[lb, ub]``.""" return min(max(x, lb), ub) def normalize_slice(e, n): """ Return the slice tuple normalized for an ``n``-element object. :param e: a slice object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``. """ if n == 0: return (0, 0, 1) step = e.step if step is None: step = 1 if step == 0: start = e.start count = e.stop if isinstance(start, int) and isinstance(count, int) and count >= 0: if start < 0: start += n if start < 0: return (0, 0, 0) return (start, count, 0) else: raise ValueError("Invalid slice %r" % e) assert isinstance(step, int) and step != 0 if e.start is None: start = 0 if step > 0 else n - 1 else: start = e.start if start < 0: start += n if (start < 0 and step < 0) or (start >= n and step > 0): return (0, 0, 0) start = min(max(0, start), n - 1) assert isinstance(start, int) and 0 <= start < n, \ "Invalid start: %r" % start if e.stop is None: if step > 0: count = (n - 1 - start) // step + 1 else: count = (start // -step) + 1 else: stop = e.stop if stop < 0: stop += n if step > 0: if stop > start: count = (min(n, stop) - 1 - start) // step + 1 else: count = 0 else: if stop < start: count = (start - max(stop, -1) - 1) // -step + 1 else: count = 0 assert isinstance(count, int) and count >= 0 assert count == 0 or 0 <= start + step * (count - 1) < n, \ "Wrong tuple: (%d, %d, %d)" % (start, count, step) return (start, count, step) def normalize_range(e, n): """ Return the range tuple normalized for an ``n``-element object. The semantics of a range is slightly different than that of a slice. In particular, a range is similar to a list in meaning (and on Py2 it was eagerly expanded into a list). Thus we do not allow the range to generate indices that would be invalid for an ``n``-array. Furthermore, we restrict the range to produce only positive or only negative indices. For example, ``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing to treat the last "-1" as the last element in the list. :param e: a range object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``, or None if the range is invalid. """ if e.step > 0: count = max(0, (e.stop - e.start - 1) // e.step + 1) else: count = max(0, (e.start - e.stop - 1) // -e.step + 1) if count == 0: return (0, 0, e.step) start = e.start finish = e.start + (count - 1) * e.step if start >= 0: if start >= n or finish < 0 or finish >= n: return None else: start += n finish += n if start < 0 or start >= n or finish < 0 or finish >= n: return None assert count >= 0 return (start, count, e.step) def humanize_bytes(size): """ Convert given number of bytes into a human readable representation, i.e. add prefix such as KB, MB, GB, etc. The `size` argument must be a non-negative integer. :param size: integer representing byte size of something :return: string representation of the size, in human-readable form """ if size == 0: return "0" if size is None: return "" assert size >= 0, "`size` cannot be negative, got %d" % size suffixes = "TGMK" maxl = len(suffixes) for i in range(maxl + 1): shift = (maxl - i) * 10 if size >> shift == 0: continue ndigits = 0 for nd in [3, 2, 1]: if size >> (shift + 12 - nd * 3) == 0: ndigits = nd break if ndigits == 0 or size == (size >> shift) << shift: rounded_val = str(size >> shift) else: rounded_val = "%.*f" % (ndigits, size / (1 << shift)) return "%s%sB" % (rounded_val, suffixes[i] if i < maxl else "")
h2oai/datatable
datatable/utils/misc.py
humanize_bytes
python
def humanize_bytes(size): if size == 0: return "0" if size is None: return "" assert size >= 0, "`size` cannot be negative, got %d" % size suffixes = "TGMK" maxl = len(suffixes) for i in range(maxl + 1): shift = (maxl - i) * 10 if size >> shift == 0: continue ndigits = 0 for nd in [3, 2, 1]: if size >> (shift + 12 - nd * 3) == 0: ndigits = nd break if ndigits == 0 or size == (size >> shift) << shift: rounded_val = str(size >> shift) else: rounded_val = "%.*f" % (ndigits, size / (1 << shift)) return "%s%sB" % (rounded_val, suffixes[i] if i < maxl else "")
Convert given number of bytes into a human readable representation, i.e. add prefix such as KB, MB, GB, etc. The `size` argument must be a non-negative integer. :param size: integer representing byte size of something :return: string representation of the size, in human-readable form
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/utils/misc.py#L182-L208
null
#!/usr/bin/env python3 # © H2O.ai 2018; -*- encoding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #------------------------------------------------------------------------------- import importlib from .typechecks import TImportError __all__ = ("clamp", "normalize_slice", "normalize_range", "plural_form", "load_module", "humanize_bytes") def plural_form(n, singular=None, plural=None): nabs = abs(n) if nabs == 1: if singular: return "%d %s" % (n, singular) else: return str(n) else: if nabs < 100000: nstr = str(n) else: nstr = "" while nabs: if nabs < 1000: nstr = str(nabs) + nstr break else: nstr = ",%03d%s" % (nabs % 1000, nstr) nabs = nabs // 1000 if n < 0: nstr = "-" + nstr if not singular: return nstr if not plural: last_letter = singular[-1] prev_letter = singular[-2] if len(singular) >= 2 else "" if last_letter == "s" or last_letter == "x": plural = singular + "es" elif last_letter == "y": plural = singular[:-1] + "ies" elif last_letter == "f" and prev_letter != "f": plural = singular[:-1] + "ves" elif last_letter == "e" and prev_letter == "f": plural = singular[:-2] + "ves" elif last_letter == "h" and prev_letter in "sc": # Note: words ending in 'ch' which is pronounced as /k/ # are exception to this rule: monarch -> monarchs plural = singular + "es" else: plural = singular + "s" return "%s %s" % (nstr, plural) def clamp(x, lb, ub): """Return the value of ``x`` clamped to the range ``[lb, ub]``.""" return min(max(x, lb), ub) def normalize_slice(e, n): """ Return the slice tuple normalized for an ``n``-element object. :param e: a slice object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``. """ if n == 0: return (0, 0, 1) step = e.step if step is None: step = 1 if step == 0: start = e.start count = e.stop if isinstance(start, int) and isinstance(count, int) and count >= 0: if start < 0: start += n if start < 0: return (0, 0, 0) return (start, count, 0) else: raise ValueError("Invalid slice %r" % e) assert isinstance(step, int) and step != 0 if e.start is None: start = 0 if step > 0 else n - 1 else: start = e.start if start < 0: start += n if (start < 0 and step < 0) or (start >= n and step > 0): return (0, 0, 0) start = min(max(0, start), n - 1) assert isinstance(start, int) and 0 <= start < n, \ "Invalid start: %r" % start if e.stop is None: if step > 0: count = (n - 1 - start) // step + 1 else: count = (start // -step) + 1 else: stop = e.stop if stop < 0: stop += n if step > 0: if stop > start: count = (min(n, stop) - 1 - start) // step + 1 else: count = 0 else: if stop < start: count = (start - max(stop, -1) - 1) // -step + 1 else: count = 0 assert isinstance(count, int) and count >= 0 assert count == 0 or 0 <= start + step * (count - 1) < n, \ "Wrong tuple: (%d, %d, %d)" % (start, count, step) return (start, count, step) def normalize_range(e, n): """ Return the range tuple normalized for an ``n``-element object. The semantics of a range is slightly different than that of a slice. In particular, a range is similar to a list in meaning (and on Py2 it was eagerly expanded into a list). Thus we do not allow the range to generate indices that would be invalid for an ``n``-array. Furthermore, we restrict the range to produce only positive or only negative indices. For example, ``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing to treat the last "-1" as the last element in the list. :param e: a range object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``, or None if the range is invalid. """ if e.step > 0: count = max(0, (e.stop - e.start - 1) // e.step + 1) else: count = max(0, (e.start - e.stop - 1) // -e.step + 1) if count == 0: return (0, 0, e.step) start = e.start finish = e.start + (count - 1) * e.step if start >= 0: if start >= n or finish < 0 or finish >= n: return None else: start += n finish += n if start < 0 or start >= n or finish < 0 or finish >= n: return None assert count >= 0 return (start, count, e.step) def load_module(module): """ Import and return the requested module. """ try: m = importlib.import_module(module) return m except ModuleNotFoundError: # pragma: no cover raise TImportError("Module `%s` is not installed. It is required for " "running this function." % module)
h2oai/datatable
datatable/widget.py
DataFrameWidget._fetch_data
python
def _fetch_data(self): self._view_col0 = clamp(self._view_col0, 0, self._max_col0) self._view_row0 = clamp(self._view_row0, 0, self._max_row0) self._view_ncols = clamp(self._view_ncols, 0, self._conn.frame_ncols - self._view_col0) self._view_nrows = clamp(self._view_nrows, 0, self._conn.frame_nrows - self._view_row0) self._conn.fetch_data( self._view_row0, self._view_row0 + self._view_nrows, self._view_col0, self._view_col0 + self._view_ncols)
Retrieve frame data within the current view window. This method will adjust the view window if it goes out-of-bounds.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/widget.py#L323-L339
null
class DataFrameWidget(object): """ Widget for displaying frame's data interactively. This widget will show the data in form of the table, and then (if necessary) enter into the "interactive" mode, responding to user's input in order to move the viewport. Along the Y dimension, we will display at most `VIEW_NROWS_MAX` rows. Less if the terminal doesn't fit that many. However if the terminal cannot fit at least `VIEW_NROWS_MIN` rows then the interactive mode will be disabled. The position of the viewport along Y will be restricted so that the fixed amount of rows is always displayed (for example if frame_nrows=100 and view_nrows=30, then view_row0 cannot exceed 70). Along dimension X, everything is slightly more complicated since all columns may have different width. """ VIEW_NROWS_MIN = 10 VIEW_NROWS_MAX = 30 RIGHT_MARGIN = 2 def __init__(self, obj, interactive=None): if interactive is None: interactive = options.display.interactive if term.jupyter: interactive = False if isinstance(obj, coreFrame): self._conn = DatatableFrameConnector(obj) else: raise TypeError("Unknown object of type %s" % type(obj)) # Coordinates of the data window within the frame self._view_col0 = 0 self._view_row0 = 0 self._view_ncols = 30 self._view_nrows = DataFrameWidget.VIEW_NROWS_MAX # Largest allowed values for ``self._view_(col|row)0`` self._max_row0 = 0 self._max_col0 = max(0, self._conn.frame_ncols - 1) self._adjust_viewport() # Display state self._n_displayed_lines = 0 self._show_types = False self._show_navbar = options.display.interactive_hint and interactive self._interactive = interactive self._colwidths = {} self._term_width = term.width self._term_height = term.height self._jump_string = None def render(self): self._draw() if self._interactive: self._interact() def as_string(self): self._interactive = False self._show_navbar = False colors = term.using_colors() term.use_colors(False) out = self._draw(to_string=True) term.use_colors(colors) return out #--------------------------------------------------------------------------- # Private #--------------------------------------------------------------------------- def _draw(self, to_string=False): self._fetch_data() columns = [] # Process key columns keynames = self._conn.key_names keydata = self._conn.key_data keyltypes = self._conn.key_ltypes keystypes = self._conn.key_stypes if keydata: for i, colname in enumerate(keynames): if self._show_types and keystypes[i]: colname = term.color("cyan", keystypes[i].name) oldwidth = self._colwidths.get(colname, 2) col = _Column(name=colname, ctype=keyltypes[i], data=keydata[i], color="bright_black", minwidth=oldwidth) self._colwidths[colname] = col.width columns.append(col) if keynames[0]: columns[-1].margin = "" columns.append(_Divider()) # Process data columns viewnames = self._conn.view_names viewltypes = self._conn.view_ltypes viewstypes = self._conn.view_stypes viewdata = self._conn.view_data if viewdata: for i, colname in enumerate(viewnames): if self._show_types: colname = term.color("cyan", viewstypes[i].name) oldwidth = self._colwidths.get(i + self._view_col0, 2) col = _Column(name=colname, ctype=viewltypes[i], data=viewdata[i], minwidth=oldwidth) if self._view_ncols < self._conn.frame_ncols: col.width = min(col.width, _Column.MAX_WIDTH) self._colwidths[i + self._view_col0] = col.width columns.append(col) columns[-1].margin = "" # Adjust widths of columns total_width = sum(col.width + len(col.margin) for col in columns) extra_space = term.width - total_width - DataFrameWidget.RIGHT_MARGIN if extra_space > 0: if self._view_col0 + self._view_ncols < self._conn.frame_ncols: self._view_ncols += max(1, extra_space // 8) return self._draw(to_string) elif self._view_col0 > 0: w = self._fetch_column_width(self._view_col0 - 1) if w + 2 <= extra_space: self._view_col0 -= 1 self._view_ncols += 1 self._max_col0 = self._view_col0 return self._draw(to_string) else: if self._max_col0 == self._view_col0: self._max_col0 += 1 available_width = term.width - DataFrameWidget.RIGHT_MARGIN for i, col in enumerate(columns): col.width = min(col.width, available_width) available_width -= col.width + len(col.margin) if available_width <= 0: available_width = 0 col.margin = "" else: self._view_ncols = i + 1 # Generate the elements of the display at_last_row = (self._view_row0 + self._view_nrows == self._conn.frame_nrows) grey = lambda s: term.color("bright_black", s) header = ["".join(col.header for col in columns), grey("".join(col.divider for col in columns))] rows = ["".join(col.value(j) for col in columns) for j in range(self._view_nrows)] srows = plural_form(self._conn.frame_nrows, "row") scols = plural_form(self._conn.frame_ncols, "column") footer = ["" if at_last_row else grey("..."), "[%s x %s]" % (srows, scols), ""] # Display hint about navigation keys if self._show_navbar: remaining_width = term.width if self._jump_string is None: nav_elements = [grey("Press") + " q " + grey("to quit"), " ↑←↓→ " + grey("to move"), " wasd " + grey("to page"), " t " + grey("to toggle types"), " g " + grey("to jump")] for elem in nav_elements: l = term.length(elem) if l > remaining_width: break remaining_width -= l footer[2] += elem else: footer[2] = grey("Go to (row:col): ") + self._jump_string # Render the table if term.ipython: # In IPython, we insert an extra newline in front, because IPython # prints "Out [3]: " in front of the output value, which causes all # column headers to become misaligned. # Likewise, IPython tends to insert an extra newline at the end of # the output, so we remove our own extra newline. header.insert(0, "") if not footer[2]: footer.pop() lines = header + rows + footer if to_string: return "\n".join(lines) term.rewrite_lines(lines, self._n_displayed_lines) self._n_displayed_lines = len(lines) - 1 def _interact(self): old_handler = register_onresize(self._onresize) try: for ch in term.wait_for_keypresses(0.5): if not ch: # Signal handler could have invalidated interactive mode # of the widget -- in which case we need to stop rendering if not self._interactive: break else: continue uch = ch.name if ch.is_sequence else ch.upper() if self._jump_string is None: if uch == "Q" or uch == "KEY_ESCAPE": break if uch in DataFrameWidget._MOVES: DataFrameWidget._MOVES[uch](self) else: if uch in {"Q", "KEY_ESCAPE", "KEY_ENTER"}: self._jump_string = None self._draw() elif uch == "KEY_DELETE" or uch == "KEY_BACKSPACE": self._jump_to(self._jump_string[:-1]) elif uch in "0123456789:": self._jump_to(self._jump_string + uch) except KeyboardInterrupt: pass register_onresize(old_handler) # Clear the interactive prompt if self._show_navbar: term.clear_line(end="\n") _MOVES = { "KEY_LEFT": lambda self: self._move_viewport(dx=-1), "KEY_RIGHT": lambda self: self._move_viewport(dx=1), "KEY_UP": lambda self: self._move_viewport(dy=-1), "KEY_DOWN": lambda self: self._move_viewport(dy=1), "KEY_SLEFT": lambda self: self._move_viewport(x=0), "KEY_SRIGHT": lambda self: self._move_viewport(x=self._max_col0), "KEY_SUP": lambda self: self._move_viewport(y=0), "KEY_SDOWN": lambda self: self._move_viewport(y=self._max_row0), "W": lambda self: self._move_viewport(dy=-self._view_nrows), "S": lambda self: self._move_viewport(dy=self._view_nrows), "A": lambda self: self._move_viewport(dx=-self._view_ncols), "D": lambda self: self._move_viewport(dx=max(1, self._view_ncols - 1)), "T": lambda self: self._toggle_types(), "G": lambda self: self._toggle_jump_mode(), } def _fetch_column_width(self, icol): if icol in self._colwidths: return self._colwidths[icol] else: self._conn.fetch_data( self._view_row0, self._view_row0 + self._view_nrows, icol, icol + 1) col = _Column(name=self._conn.view_names[0], ctype=self._conn.view_ltypes[0], data=self._conn.data[0]) w = min(col.width, _Column.MAX_WIDTH) self._colwidths[icol] = w return w def _move_viewport(self, dx=0, dy=0, x=None, y=None, force_draw=False): if x is None: x = self._view_col0 + dx if y is None: y = self._view_row0 + dy ncol0 = clamp(x, 0, self._max_col0) nrow0 = clamp(y, 0, self._max_row0) if ncol0 != self._view_col0 or nrow0 != self._view_row0 or force_draw: self._view_col0 = ncol0 self._view_row0 = nrow0 self._draw() def _toggle_types(self): self._show_types = not self._show_types self._draw() def _toggle_jump_mode(self): assert self._jump_string is None self._jump_string = "" self._draw() def _jump_to(self, newloc): parts = newloc.split(":") if parts[0]: newy = int(parts[0]) - self._view_nrows // 2 else: newy = None if len(parts) >= 2 and parts[1]: newx = int(parts[1]) else: newx = None self._jump_string = ":".join(parts[:2]) self._move_viewport(x=newx, y=newy, force_draw=True) def _adjust_viewport(self): # Adjust Y position new_nrows = min(DataFrameWidget.VIEW_NROWS_MAX, max(term.height - 6, DataFrameWidget.VIEW_NROWS_MIN), self._conn.frame_nrows) self._view_nrows = new_nrows self._max_row0 = self._conn.frame_nrows - new_nrows self._view_row0 = min(self._view_row0, self._max_row0) def _onresize(self, signum, stkfrm): if term.width < self._term_width: self._interactive = False else: self._adjust_viewport() self._term_width = term.width self._term_height = term.height self._draw()
h2oai/datatable
ci/make_fast.py
get_files
python
def get_files(): sources = [] headers = ["datatable/include/datatable.h"] assert os.path.isfile(headers[0]) for dirpath, _, filenames in os.walk("c"): for f in filenames: fullname = os.path.join(dirpath, f) if f.endswith(".h") or f.endswith(".inc"): headers.append(fullname) elif f.endswith(".c") or f.endswith(".cc"): sources.append(fullname) return (sources, headers)
Return the list of all source/header files in `c/` directory. The files will have pathnames relative to the current folder, for example "c/csv/reader_utils.cc".
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/ci/make_fast.py#L15-L32
null
#!/usr/bin/env python3 # © H2O.ai 2018; -*- encoding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #------------------------------------------------------------------------------- import os import re import sys rx_include = re.compile(r'#include\s+"(.*?)"') rx_targeth = re.compile(r'^([/\w]+\.h)\s*:\s*(.*)') def find_includes(filename): """ Find user includes (no system includes) requested from given source file. All .h files will be given relative to the current folder, e.g. ["c/rowindex.h", "c/column.h"]. """ includes = [] with open(filename, "r", encoding="utf-8") as inp: for line in inp: line = line.strip() if not line or line.startswith("//"): continue if line.startswith("#"): mm = re.match(rx_include, line) if mm: includename = os.path.join("c", mm.group(1)) includes.append(includename) return includes def build_headermap(headers): """ Construct dictionary {header_file : set_of_included_files}. This function operates on "real" set of includes, in the sense that it parses each header file to check which files are included from there. """ # TODO: what happens if some headers are circularly dependent? headermap = {} for hfile in headers: headermap[hfile] = None for hfile in headers: assert (hfile.startswith("c/") or hfile.startswith("datatable/include/")) inc = find_includes(hfile) for f in inc: assert f != hfile, "File %s includes itself?" % f assert f.startswith("c/") if f not in headers: raise ValueError("Unknown header \"%s\" included from %s" % (f, hfile)) headermap[hfile] = set(inc) return headermap def build_sourcemap(sources): """ Similar to build_headermap(), but builds a dictionary of includes from the "source" files (i.e. ".c/.cc" files). """ sourcemap = {} for sfile in sources: inc = find_includes(sfile) sourcemap[sfile] = set(inc) return sourcemap def write_header(out): out.write("#" + "-" * 79 + "\n") out.write("""# Copyright 2018 H2O.ai # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """) out.write("#" + "-" * 79 + "\n") out.write("# This file is auto-generated from ci/make_fast.py\n") out.write("#" + "-" * 79 + "\n") def write_headers_to_makefile(headermap, out): out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Header files\n") out.write("#" + "-" * 79 + "\n\n") for hfile in sorted(headermap.keys()): if hfile.startswith("datatable"): target = "$(BUILDDIR)/" + hfile dependencies = "" else: target = "$(BUILDDIR)/" + hfile[2:] dependencies = " ".join("$(BUILDDIR)/%s" % d[2:] for d in sorted(headermap[hfile])) hdir = os.path.dirname(target) out.write("%s: %s %s | %s\n" % (target, hfile, dependencies, hdir)) out.write("\t@echo • Refreshing %s\n" % hfile) out.write("\t@cp %s $@\n" % hfile) out.write("\n") def write_sources_to_makefile(sourcemap, out): def header_file(d): if d.startswith("c/"): d = d[2:] if d.startswith("../"): d = d[3:] return d out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Object files\n") out.write("#" + "-" * 79 + "\n\n") for ccfile in sorted(sourcemap.keys()): assert ccfile.endswith(".cc") target = "$(BUILDDIR)/" + ccfile[2:-3] + ".o" odir = os.path.dirname(target) dependencies = " ".join("$(BUILDDIR)/%s" % header_file(d) for d in sorted(sourcemap[ccfile])) out.write("%s: %s %s | %s\n" % (target, ccfile, dependencies, odir)) out.write("\t@echo • Compiling %s\n" % ccfile) out.write("\t@$(CC) -c %s $(CCFLAGS) -o $@\n" % ccfile) out.write("\n") def write_objects_list(sourcemap, out): ml = max(len(c) for c in sourcemap) - 2 out.write("\n\n") out.write("fast_objects = %s\\\n" % (" " * (ml + 1))) for ccfile in sorted(sourcemap.keys()): ofile = ccfile[2:-3] + ".o" out.write("\t$(BUILDDIR)/%s%s\\\n" % (ofile, " " * (ml - len(ofile)))) def write_build_directories(realhdrs, realsrcs, out): def clean_dir_name(inp): if inp.startswith("c/"): inp = inp[2:] return inp out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Build directories\n") out.write("#" + "-" * 79 + "\n") out.write("\n") inputs = list(realhdrs) + list(realsrcs) alldirs = set(os.path.dirname("$(BUILDDIR)/" + clean_dir_name(inp)) for inp in inputs) for target in alldirs: out.write("%s:\n" % target) out.write("\t@echo • Making directory $@\n") out.write("\t@mkdir -p $@\n") out.write("\n") def get_setup(cmd): import subprocess out = subprocess.check_output(["python", "ci/setup_utils.py", cmd]) out = out.decode() return out.strip().replace("$", "\\$") def write_make_targets(out): out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Main\n") out.write("#" + "-" * 79 + "\n") out.write(".PHONY: fast main-fast\n") out.write("\n") out.write("fast:\n") out.write("\t$(eval DTDEBUG := 1)\n") out.write("\t$(eval export DTDEBUG)\n") out.write("\t$(eval CC := %s)\n" % get_setup("compiler")) out.write("\t$(eval CCFLAGS := %s)\n" % get_setup("ccflags")) out.write("\t$(eval LDFLAGS := %s)\n" % get_setup("ldflags")) out.write("\t$(eval EXTEXT := %s)\n" % get_setup("ext_suffix")) out.write("\t$(eval export CC CCFLAGS LDFLAGS EXTEXT)\n") out.write("\t@$(MAKE) main-fast\n") out.write("\n") out.write("main-fast: $(BUILDDIR)/_datatable.so\n") out.write("\t@echo • Done.\n") out.write("\n") out.write("$(BUILDDIR)/_datatable.so: $(fast_objects)\n") out.write("\t@echo • Linking object files into _datatable.so\n") out.write("\t@$(CC) $(LDFLAGS) -o $@ $+\n") out.write("\t@echo • Copying _datatable.so into ``datatable/lib/_datatable$(EXTEXT)``\n") out.write("\t@cp $(BUILDDIR)/_datatable.so datatable/lib/_datatable$(EXTEXT)\n") out.write("\n") def main(): sources, headers = get_files() realhdrs = build_headermap(headers) realsrcs = build_sourcemap(sources) with open("ci/fast.mk", "wt", encoding="utf-8") as out: write_header(out) write_build_directories(realhdrs, realsrcs, out) write_headers_to_makefile(realhdrs, out) write_sources_to_makefile(realsrcs, out) write_objects_list(realsrcs, out) write_make_targets(out) if __name__ == "__main__": try: main() except ValueError as e: print("\n Error: %s" % e) sys.exit(1)
h2oai/datatable
ci/make_fast.py
find_includes
python
def find_includes(filename): includes = [] with open(filename, "r", encoding="utf-8") as inp: for line in inp: line = line.strip() if not line or line.startswith("//"): continue if line.startswith("#"): mm = re.match(rx_include, line) if mm: includename = os.path.join("c", mm.group(1)) includes.append(includename) return includes
Find user includes (no system includes) requested from given source file. All .h files will be given relative to the current folder, e.g. ["c/rowindex.h", "c/column.h"].
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/ci/make_fast.py#L35-L53
null
#!/usr/bin/env python3 # © H2O.ai 2018; -*- encoding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #------------------------------------------------------------------------------- import os import re import sys rx_include = re.compile(r'#include\s+"(.*?)"') rx_targeth = re.compile(r'^([/\w]+\.h)\s*:\s*(.*)') def get_files(): """ Return the list of all source/header files in `c/` directory. The files will have pathnames relative to the current folder, for example "c/csv/reader_utils.cc". """ sources = [] headers = ["datatable/include/datatable.h"] assert os.path.isfile(headers[0]) for dirpath, _, filenames in os.walk("c"): for f in filenames: fullname = os.path.join(dirpath, f) if f.endswith(".h") or f.endswith(".inc"): headers.append(fullname) elif f.endswith(".c") or f.endswith(".cc"): sources.append(fullname) return (sources, headers) def build_headermap(headers): """ Construct dictionary {header_file : set_of_included_files}. This function operates on "real" set of includes, in the sense that it parses each header file to check which files are included from there. """ # TODO: what happens if some headers are circularly dependent? headermap = {} for hfile in headers: headermap[hfile] = None for hfile in headers: assert (hfile.startswith("c/") or hfile.startswith("datatable/include/")) inc = find_includes(hfile) for f in inc: assert f != hfile, "File %s includes itself?" % f assert f.startswith("c/") if f not in headers: raise ValueError("Unknown header \"%s\" included from %s" % (f, hfile)) headermap[hfile] = set(inc) return headermap def build_sourcemap(sources): """ Similar to build_headermap(), but builds a dictionary of includes from the "source" files (i.e. ".c/.cc" files). """ sourcemap = {} for sfile in sources: inc = find_includes(sfile) sourcemap[sfile] = set(inc) return sourcemap def write_header(out): out.write("#" + "-" * 79 + "\n") out.write("""# Copyright 2018 H2O.ai # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """) out.write("#" + "-" * 79 + "\n") out.write("# This file is auto-generated from ci/make_fast.py\n") out.write("#" + "-" * 79 + "\n") def write_headers_to_makefile(headermap, out): out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Header files\n") out.write("#" + "-" * 79 + "\n\n") for hfile in sorted(headermap.keys()): if hfile.startswith("datatable"): target = "$(BUILDDIR)/" + hfile dependencies = "" else: target = "$(BUILDDIR)/" + hfile[2:] dependencies = " ".join("$(BUILDDIR)/%s" % d[2:] for d in sorted(headermap[hfile])) hdir = os.path.dirname(target) out.write("%s: %s %s | %s\n" % (target, hfile, dependencies, hdir)) out.write("\t@echo • Refreshing %s\n" % hfile) out.write("\t@cp %s $@\n" % hfile) out.write("\n") def write_sources_to_makefile(sourcemap, out): def header_file(d): if d.startswith("c/"): d = d[2:] if d.startswith("../"): d = d[3:] return d out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Object files\n") out.write("#" + "-" * 79 + "\n\n") for ccfile in sorted(sourcemap.keys()): assert ccfile.endswith(".cc") target = "$(BUILDDIR)/" + ccfile[2:-3] + ".o" odir = os.path.dirname(target) dependencies = " ".join("$(BUILDDIR)/%s" % header_file(d) for d in sorted(sourcemap[ccfile])) out.write("%s: %s %s | %s\n" % (target, ccfile, dependencies, odir)) out.write("\t@echo • Compiling %s\n" % ccfile) out.write("\t@$(CC) -c %s $(CCFLAGS) -o $@\n" % ccfile) out.write("\n") def write_objects_list(sourcemap, out): ml = max(len(c) for c in sourcemap) - 2 out.write("\n\n") out.write("fast_objects = %s\\\n" % (" " * (ml + 1))) for ccfile in sorted(sourcemap.keys()): ofile = ccfile[2:-3] + ".o" out.write("\t$(BUILDDIR)/%s%s\\\n" % (ofile, " " * (ml - len(ofile)))) def write_build_directories(realhdrs, realsrcs, out): def clean_dir_name(inp): if inp.startswith("c/"): inp = inp[2:] return inp out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Build directories\n") out.write("#" + "-" * 79 + "\n") out.write("\n") inputs = list(realhdrs) + list(realsrcs) alldirs = set(os.path.dirname("$(BUILDDIR)/" + clean_dir_name(inp)) for inp in inputs) for target in alldirs: out.write("%s:\n" % target) out.write("\t@echo • Making directory $@\n") out.write("\t@mkdir -p $@\n") out.write("\n") def get_setup(cmd): import subprocess out = subprocess.check_output(["python", "ci/setup_utils.py", cmd]) out = out.decode() return out.strip().replace("$", "\\$") def write_make_targets(out): out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Main\n") out.write("#" + "-" * 79 + "\n") out.write(".PHONY: fast main-fast\n") out.write("\n") out.write("fast:\n") out.write("\t$(eval DTDEBUG := 1)\n") out.write("\t$(eval export DTDEBUG)\n") out.write("\t$(eval CC := %s)\n" % get_setup("compiler")) out.write("\t$(eval CCFLAGS := %s)\n" % get_setup("ccflags")) out.write("\t$(eval LDFLAGS := %s)\n" % get_setup("ldflags")) out.write("\t$(eval EXTEXT := %s)\n" % get_setup("ext_suffix")) out.write("\t$(eval export CC CCFLAGS LDFLAGS EXTEXT)\n") out.write("\t@$(MAKE) main-fast\n") out.write("\n") out.write("main-fast: $(BUILDDIR)/_datatable.so\n") out.write("\t@echo • Done.\n") out.write("\n") out.write("$(BUILDDIR)/_datatable.so: $(fast_objects)\n") out.write("\t@echo • Linking object files into _datatable.so\n") out.write("\t@$(CC) $(LDFLAGS) -o $@ $+\n") out.write("\t@echo • Copying _datatable.so into ``datatable/lib/_datatable$(EXTEXT)``\n") out.write("\t@cp $(BUILDDIR)/_datatable.so datatable/lib/_datatable$(EXTEXT)\n") out.write("\n") def main(): sources, headers = get_files() realhdrs = build_headermap(headers) realsrcs = build_sourcemap(sources) with open("ci/fast.mk", "wt", encoding="utf-8") as out: write_header(out) write_build_directories(realhdrs, realsrcs, out) write_headers_to_makefile(realhdrs, out) write_sources_to_makefile(realsrcs, out) write_objects_list(realsrcs, out) write_make_targets(out) if __name__ == "__main__": try: main() except ValueError as e: print("\n Error: %s" % e) sys.exit(1)
h2oai/datatable
ci/make_fast.py
build_headermap
python
def build_headermap(headers): # TODO: what happens if some headers are circularly dependent? headermap = {} for hfile in headers: headermap[hfile] = None for hfile in headers: assert (hfile.startswith("c/") or hfile.startswith("datatable/include/")) inc = find_includes(hfile) for f in inc: assert f != hfile, "File %s includes itself?" % f assert f.startswith("c/") if f not in headers: raise ValueError("Unknown header \"%s\" included from %s" % (f, hfile)) headermap[hfile] = set(inc) return headermap
Construct dictionary {header_file : set_of_included_files}. This function operates on "real" set of includes, in the sense that it parses each header file to check which files are included from there.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/ci/make_fast.py#L56-L78
[ "def find_includes(filename):\n \"\"\"\n Find user includes (no system includes) requested from given source file.\n\n All .h files will be given relative to the current folder, e.g.\n [\"c/rowindex.h\", \"c/column.h\"].\n \"\"\"\n includes = []\n with open(filename, \"r\", encoding=\"utf-8\") as inp:\n for line in inp:\n line = line.strip()\n if not line or line.startswith(\"//\"):\n continue\n if line.startswith(\"#\"):\n mm = re.match(rx_include, line)\n if mm:\n includename = os.path.join(\"c\", mm.group(1))\n includes.append(includename)\n return includes\n" ]
#!/usr/bin/env python3 # © H2O.ai 2018; -*- encoding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #------------------------------------------------------------------------------- import os import re import sys rx_include = re.compile(r'#include\s+"(.*?)"') rx_targeth = re.compile(r'^([/\w]+\.h)\s*:\s*(.*)') def get_files(): """ Return the list of all source/header files in `c/` directory. The files will have pathnames relative to the current folder, for example "c/csv/reader_utils.cc". """ sources = [] headers = ["datatable/include/datatable.h"] assert os.path.isfile(headers[0]) for dirpath, _, filenames in os.walk("c"): for f in filenames: fullname = os.path.join(dirpath, f) if f.endswith(".h") or f.endswith(".inc"): headers.append(fullname) elif f.endswith(".c") or f.endswith(".cc"): sources.append(fullname) return (sources, headers) def find_includes(filename): """ Find user includes (no system includes) requested from given source file. All .h files will be given relative to the current folder, e.g. ["c/rowindex.h", "c/column.h"]. """ includes = [] with open(filename, "r", encoding="utf-8") as inp: for line in inp: line = line.strip() if not line or line.startswith("//"): continue if line.startswith("#"): mm = re.match(rx_include, line) if mm: includename = os.path.join("c", mm.group(1)) includes.append(includename) return includes def build_sourcemap(sources): """ Similar to build_headermap(), but builds a dictionary of includes from the "source" files (i.e. ".c/.cc" files). """ sourcemap = {} for sfile in sources: inc = find_includes(sfile) sourcemap[sfile] = set(inc) return sourcemap def write_header(out): out.write("#" + "-" * 79 + "\n") out.write("""# Copyright 2018 H2O.ai # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """) out.write("#" + "-" * 79 + "\n") out.write("# This file is auto-generated from ci/make_fast.py\n") out.write("#" + "-" * 79 + "\n") def write_headers_to_makefile(headermap, out): out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Header files\n") out.write("#" + "-" * 79 + "\n\n") for hfile in sorted(headermap.keys()): if hfile.startswith("datatable"): target = "$(BUILDDIR)/" + hfile dependencies = "" else: target = "$(BUILDDIR)/" + hfile[2:] dependencies = " ".join("$(BUILDDIR)/%s" % d[2:] for d in sorted(headermap[hfile])) hdir = os.path.dirname(target) out.write("%s: %s %s | %s\n" % (target, hfile, dependencies, hdir)) out.write("\t@echo • Refreshing %s\n" % hfile) out.write("\t@cp %s $@\n" % hfile) out.write("\n") def write_sources_to_makefile(sourcemap, out): def header_file(d): if d.startswith("c/"): d = d[2:] if d.startswith("../"): d = d[3:] return d out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Object files\n") out.write("#" + "-" * 79 + "\n\n") for ccfile in sorted(sourcemap.keys()): assert ccfile.endswith(".cc") target = "$(BUILDDIR)/" + ccfile[2:-3] + ".o" odir = os.path.dirname(target) dependencies = " ".join("$(BUILDDIR)/%s" % header_file(d) for d in sorted(sourcemap[ccfile])) out.write("%s: %s %s | %s\n" % (target, ccfile, dependencies, odir)) out.write("\t@echo • Compiling %s\n" % ccfile) out.write("\t@$(CC) -c %s $(CCFLAGS) -o $@\n" % ccfile) out.write("\n") def write_objects_list(sourcemap, out): ml = max(len(c) for c in sourcemap) - 2 out.write("\n\n") out.write("fast_objects = %s\\\n" % (" " * (ml + 1))) for ccfile in sorted(sourcemap.keys()): ofile = ccfile[2:-3] + ".o" out.write("\t$(BUILDDIR)/%s%s\\\n" % (ofile, " " * (ml - len(ofile)))) def write_build_directories(realhdrs, realsrcs, out): def clean_dir_name(inp): if inp.startswith("c/"): inp = inp[2:] return inp out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Build directories\n") out.write("#" + "-" * 79 + "\n") out.write("\n") inputs = list(realhdrs) + list(realsrcs) alldirs = set(os.path.dirname("$(BUILDDIR)/" + clean_dir_name(inp)) for inp in inputs) for target in alldirs: out.write("%s:\n" % target) out.write("\t@echo • Making directory $@\n") out.write("\t@mkdir -p $@\n") out.write("\n") def get_setup(cmd): import subprocess out = subprocess.check_output(["python", "ci/setup_utils.py", cmd]) out = out.decode() return out.strip().replace("$", "\\$") def write_make_targets(out): out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Main\n") out.write("#" + "-" * 79 + "\n") out.write(".PHONY: fast main-fast\n") out.write("\n") out.write("fast:\n") out.write("\t$(eval DTDEBUG := 1)\n") out.write("\t$(eval export DTDEBUG)\n") out.write("\t$(eval CC := %s)\n" % get_setup("compiler")) out.write("\t$(eval CCFLAGS := %s)\n" % get_setup("ccflags")) out.write("\t$(eval LDFLAGS := %s)\n" % get_setup("ldflags")) out.write("\t$(eval EXTEXT := %s)\n" % get_setup("ext_suffix")) out.write("\t$(eval export CC CCFLAGS LDFLAGS EXTEXT)\n") out.write("\t@$(MAKE) main-fast\n") out.write("\n") out.write("main-fast: $(BUILDDIR)/_datatable.so\n") out.write("\t@echo • Done.\n") out.write("\n") out.write("$(BUILDDIR)/_datatable.so: $(fast_objects)\n") out.write("\t@echo • Linking object files into _datatable.so\n") out.write("\t@$(CC) $(LDFLAGS) -o $@ $+\n") out.write("\t@echo • Copying _datatable.so into ``datatable/lib/_datatable$(EXTEXT)``\n") out.write("\t@cp $(BUILDDIR)/_datatable.so datatable/lib/_datatable$(EXTEXT)\n") out.write("\n") def main(): sources, headers = get_files() realhdrs = build_headermap(headers) realsrcs = build_sourcemap(sources) with open("ci/fast.mk", "wt", encoding="utf-8") as out: write_header(out) write_build_directories(realhdrs, realsrcs, out) write_headers_to_makefile(realhdrs, out) write_sources_to_makefile(realsrcs, out) write_objects_list(realsrcs, out) write_make_targets(out) if __name__ == "__main__": try: main() except ValueError as e: print("\n Error: %s" % e) sys.exit(1)
h2oai/datatable
ci/make_fast.py
build_sourcemap
python
def build_sourcemap(sources): sourcemap = {} for sfile in sources: inc = find_includes(sfile) sourcemap[sfile] = set(inc) return sourcemap
Similar to build_headermap(), but builds a dictionary of includes from the "source" files (i.e. ".c/.cc" files).
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/ci/make_fast.py#L81-L90
[ "def find_includes(filename):\n \"\"\"\n Find user includes (no system includes) requested from given source file.\n\n All .h files will be given relative to the current folder, e.g.\n [\"c/rowindex.h\", \"c/column.h\"].\n \"\"\"\n includes = []\n with open(filename, \"r\", encoding=\"utf-8\") as inp:\n for line in inp:\n line = line.strip()\n if not line or line.startswith(\"//\"):\n continue\n if line.startswith(\"#\"):\n mm = re.match(rx_include, line)\n if mm:\n includename = os.path.join(\"c\", mm.group(1))\n includes.append(includename)\n return includes\n" ]
#!/usr/bin/env python3 # © H2O.ai 2018; -*- encoding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #------------------------------------------------------------------------------- import os import re import sys rx_include = re.compile(r'#include\s+"(.*?)"') rx_targeth = re.compile(r'^([/\w]+\.h)\s*:\s*(.*)') def get_files(): """ Return the list of all source/header files in `c/` directory. The files will have pathnames relative to the current folder, for example "c/csv/reader_utils.cc". """ sources = [] headers = ["datatable/include/datatable.h"] assert os.path.isfile(headers[0]) for dirpath, _, filenames in os.walk("c"): for f in filenames: fullname = os.path.join(dirpath, f) if f.endswith(".h") or f.endswith(".inc"): headers.append(fullname) elif f.endswith(".c") or f.endswith(".cc"): sources.append(fullname) return (sources, headers) def find_includes(filename): """ Find user includes (no system includes) requested from given source file. All .h files will be given relative to the current folder, e.g. ["c/rowindex.h", "c/column.h"]. """ includes = [] with open(filename, "r", encoding="utf-8") as inp: for line in inp: line = line.strip() if not line or line.startswith("//"): continue if line.startswith("#"): mm = re.match(rx_include, line) if mm: includename = os.path.join("c", mm.group(1)) includes.append(includename) return includes def build_headermap(headers): """ Construct dictionary {header_file : set_of_included_files}. This function operates on "real" set of includes, in the sense that it parses each header file to check which files are included from there. """ # TODO: what happens if some headers are circularly dependent? headermap = {} for hfile in headers: headermap[hfile] = None for hfile in headers: assert (hfile.startswith("c/") or hfile.startswith("datatable/include/")) inc = find_includes(hfile) for f in inc: assert f != hfile, "File %s includes itself?" % f assert f.startswith("c/") if f not in headers: raise ValueError("Unknown header \"%s\" included from %s" % (f, hfile)) headermap[hfile] = set(inc) return headermap def write_header(out): out.write("#" + "-" * 79 + "\n") out.write("""# Copyright 2018 H2O.ai # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """) out.write("#" + "-" * 79 + "\n") out.write("# This file is auto-generated from ci/make_fast.py\n") out.write("#" + "-" * 79 + "\n") def write_headers_to_makefile(headermap, out): out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Header files\n") out.write("#" + "-" * 79 + "\n\n") for hfile in sorted(headermap.keys()): if hfile.startswith("datatable"): target = "$(BUILDDIR)/" + hfile dependencies = "" else: target = "$(BUILDDIR)/" + hfile[2:] dependencies = " ".join("$(BUILDDIR)/%s" % d[2:] for d in sorted(headermap[hfile])) hdir = os.path.dirname(target) out.write("%s: %s %s | %s\n" % (target, hfile, dependencies, hdir)) out.write("\t@echo • Refreshing %s\n" % hfile) out.write("\t@cp %s $@\n" % hfile) out.write("\n") def write_sources_to_makefile(sourcemap, out): def header_file(d): if d.startswith("c/"): d = d[2:] if d.startswith("../"): d = d[3:] return d out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Object files\n") out.write("#" + "-" * 79 + "\n\n") for ccfile in sorted(sourcemap.keys()): assert ccfile.endswith(".cc") target = "$(BUILDDIR)/" + ccfile[2:-3] + ".o" odir = os.path.dirname(target) dependencies = " ".join("$(BUILDDIR)/%s" % header_file(d) for d in sorted(sourcemap[ccfile])) out.write("%s: %s %s | %s\n" % (target, ccfile, dependencies, odir)) out.write("\t@echo • Compiling %s\n" % ccfile) out.write("\t@$(CC) -c %s $(CCFLAGS) -o $@\n" % ccfile) out.write("\n") def write_objects_list(sourcemap, out): ml = max(len(c) for c in sourcemap) - 2 out.write("\n\n") out.write("fast_objects = %s\\\n" % (" " * (ml + 1))) for ccfile in sorted(sourcemap.keys()): ofile = ccfile[2:-3] + ".o" out.write("\t$(BUILDDIR)/%s%s\\\n" % (ofile, " " * (ml - len(ofile)))) def write_build_directories(realhdrs, realsrcs, out): def clean_dir_name(inp): if inp.startswith("c/"): inp = inp[2:] return inp out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Build directories\n") out.write("#" + "-" * 79 + "\n") out.write("\n") inputs = list(realhdrs) + list(realsrcs) alldirs = set(os.path.dirname("$(BUILDDIR)/" + clean_dir_name(inp)) for inp in inputs) for target in alldirs: out.write("%s:\n" % target) out.write("\t@echo • Making directory $@\n") out.write("\t@mkdir -p $@\n") out.write("\n") def get_setup(cmd): import subprocess out = subprocess.check_output(["python", "ci/setup_utils.py", cmd]) out = out.decode() return out.strip().replace("$", "\\$") def write_make_targets(out): out.write("\n\n") out.write("#" + "-" * 79 + "\n") out.write("# Main\n") out.write("#" + "-" * 79 + "\n") out.write(".PHONY: fast main-fast\n") out.write("\n") out.write("fast:\n") out.write("\t$(eval DTDEBUG := 1)\n") out.write("\t$(eval export DTDEBUG)\n") out.write("\t$(eval CC := %s)\n" % get_setup("compiler")) out.write("\t$(eval CCFLAGS := %s)\n" % get_setup("ccflags")) out.write("\t$(eval LDFLAGS := %s)\n" % get_setup("ldflags")) out.write("\t$(eval EXTEXT := %s)\n" % get_setup("ext_suffix")) out.write("\t$(eval export CC CCFLAGS LDFLAGS EXTEXT)\n") out.write("\t@$(MAKE) main-fast\n") out.write("\n") out.write("main-fast: $(BUILDDIR)/_datatable.so\n") out.write("\t@echo • Done.\n") out.write("\n") out.write("$(BUILDDIR)/_datatable.so: $(fast_objects)\n") out.write("\t@echo • Linking object files into _datatable.so\n") out.write("\t@$(CC) $(LDFLAGS) -o $@ $+\n") out.write("\t@echo • Copying _datatable.so into ``datatable/lib/_datatable$(EXTEXT)``\n") out.write("\t@cp $(BUILDDIR)/_datatable.so datatable/lib/_datatable$(EXTEXT)\n") out.write("\n") def main(): sources, headers = get_files() realhdrs = build_headermap(headers) realsrcs = build_sourcemap(sources) with open("ci/fast.mk", "wt", encoding="utf-8") as out: write_header(out) write_build_directories(realhdrs, realsrcs, out) write_headers_to_makefile(realhdrs, out) write_sources_to_makefile(realsrcs, out) write_objects_list(realsrcs, out) write_make_targets(out) if __name__ == "__main__": try: main() except ValueError as e: print("\n Error: %s" % e) sys.exit(1)
h2oai/datatable
setup.py
get_c_sources
python
def get_c_sources(folder, include_headers=False): allowed_extensions = [".c", ".C", ".cc", ".cpp", ".cxx", ".c++"] if include_headers: allowed_extensions += [".h", ".hpp"] sources = [] for root, _, files in os.walk(folder): for name in files: ext = os.path.splitext(name)[1] if name == "types.cc": # Make sure `types.cc` is compiled first, as it has multiple # useful static assertions. sources.insert(0, os.path.join(root, name)) elif ext in allowed_extensions: sources.append(os.path.join(root, name)) return sources
Find all C/C++ source files in the `folder` directory.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/setup.py#L61-L76
null
#!/usr/bin/env python # -*- coding: utf-8 -*- #------------------------------------------------------------------------------- # Copyright 2018 H2O.ai # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. #------------------------------------------------------------------------------- """ Build script for the `datatable` module. $ python setup.py sdist $ python setup.py bdist_wheel $ twine upload dist/* """ import os import shutil import sys if sys.version_info < (3, 5): # Check python version here, otherwise the import from ci.setup_utils # below will fail in Python 2.7 with confusing error message raise SystemExit("\x1B[91m\nSystemExit: datatable requires Python 3.5+, " "whereas your Python is %s\n\x1B[39m" % ".".join(str(d) for d in sys.version_info)) from setuptools import setup, find_packages, Extension from ci.setup_utils import (get_datatable_version, make_git_version_file, get_compiler, get_extra_compile_flags, get_extra_link_args, find_linked_dynamic_libraries, TaskContext, islinux, ismacos, iswindows, monkey_patch_compiler) print() cmd = "" with TaskContext("Start setup.py") as log: if len(sys.argv) > 1: cmd = sys.argv[1] log.info("command = `%s`" % cmd) #------------------------------------------------------------------------------- # Generic helpers #------------------------------------------------------------------------------- def get_py_sources(): """Find python source directories.""" packages = find_packages(exclude=["tests", "tests.munging", "temp", "c"]) return packages def get_main_dependencies(): deps = ["typesentry>=0.2.6", "blessed"] # If there is an active LLVM installation, then also require the # `llvmlite` module. # llvmdir, llvmver = get_llvm(True) # if llvmdir: # llvmlite_req = (">=0.20.0,<0.21.0" if llvmver == "LLVM4" else # ">=0.21.0,<0.23.0" if llvmver == "LLVM5" else # ">=0.23.0,<0.27.0" if llvmver == "LLVM6" else # ">=0.27.0") # deps += ["llvmlite" + llvmlite_req] # # If we need to install llvmlite, this can help # if not os.environ.get("LLVM_CONFIG"): # os.environ["LLVM_CONFIG"] = \ # os.path.join(llvmdir, "bin", "llvm-config") return deps def get_test_dependencies(): # Test dependencies are exposed as extras, see # https://stackoverflow.com/questions/29870629 return [ "pytest>=3.1", "pytest-cov", "pytest-benchmark>=3.1", ] #------------------------------------------------------------------------------- # Prepare the environment #------------------------------------------------------------------------------- cpp_files = [] extra_compile_args = [] extra_link_args = [] if cmd in ("build", "bdist_wheel", "build_ext", "install"): with TaskContext("Prepare the environment") as log: # Check whether the environment is sane... if not(islinux() or ismacos() or iswindows()): log.warn("Unknown platform=%s os=%s" % (sys.platform, os.name)) # Compiler os.environ["CC"] = os.environ["CXX"] = get_compiler() if ismacos() and not os.environ.get("MACOSX_DEPLOYMENT_TARGET"): os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.13" # Force to build for a 64-bit platform only os.environ["ARCHFLAGS"] = "-m64" for n in ["CC", "CXX", "LDFLAGS", "ARCHFLAGS", "LLVM_CONFIG", "MACOSX_DEPLOYMENT_TARGET"]: log.info("%s = %s" % (n, os.environ.get(n, ""))) extra_compile_args = get_extra_compile_flags() extra_link_args = get_extra_link_args() cpp_files = get_c_sources("c") with TaskContext("Copy dynamic libraries") as log: # Copy system libraries into the datatable/lib folder, so that they can # be packaged with the wheel libs = find_linked_dynamic_libraries() for libpath in libs: trgfile = os.path.join("datatable", "lib", os.path.basename(libpath)) if os.path.exists(trgfile): log.info("File %s already exists, skipped" % trgfile) else: log.info("Copying %s to %s" % (libpath, trgfile)) shutil.copy(libpath, trgfile) monkey_patch_compiler() # Create the git version file if cmd in ("build", "sdist", "bdist_wheel", "install"): make_git_version_file(True) #------------------------------------------------------------------------------- # Main setup #------------------------------------------------------------------------------- setup( name="datatable", version=get_datatable_version(), description="Python library for fast multi-threaded data manipulation and " "munging.", long_description=""" This is a Python package for manipulating 2-dimensional tabular data structures (aka data frames). It is close in spirit to pandas or SFrame; however we put specific emphasis on speed and big data support. As the name suggests, the package is closely related to R's data.table and attempts to mimic its core algorithms and API. See https://github.com/h2oai/datatable for more details. """, # The homepage url="https://github.com/h2oai/datatable", # Author details author="Pasha Stetsenko", author_email="pasha@h2o.ai", license="Mozilla Public License v2.0", classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", "Operating System :: MacOS", "Operating System :: Unix", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Scientific/Engineering :: Information Analysis", ], keywords=["datatable", "data", "dataframe", "frame", "data.table", "munging", "numpy", "pandas", "data processing", "ETL"], packages=get_py_sources(), # Runtime dependencies install_requires=get_main_dependencies(), python_requires=">=3.5", tests_require=get_test_dependencies(), extras_require={ "testing": get_test_dependencies() }, zip_safe=True, ext_modules=[ Extension( "datatable/lib/_datatable", include_dirs=["c", "include"], sources=cpp_files, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, language="c++", ), ], package_dir={"datatable": "datatable"}, package_data={"datatable": ["lib/*.*", "include/*.h"]}, )
h2oai/datatable
datatable/fread.py
GenericReader._get_destination
python
def _get_destination(self, estimated_size): global _psutil_load_attempted if not _psutil_load_attempted: _psutil_load_attempted = True try: import psutil except ImportError: psutil = None if self.verbose and estimated_size > 1: self.logger.debug("The Frame is estimated to require %s bytes" % humanize_bytes(estimated_size)) if estimated_size < 1024 or psutil is None: return None vm = psutil.virtual_memory() if self.verbose: self.logger.debug("Memory available = %s (out of %s)" % (humanize_bytes(vm.available), humanize_bytes(vm.total))) if (estimated_size < vm.available and self._save_to is None or self._save_to == "memory"): if self.verbose: self.logger.debug("Frame will be loaded into memory") return None else: if self._save_to: tmpdir = self._save_to os.makedirs(tmpdir) else: tmpdir = tempfile.mkdtemp() du = psutil.disk_usage(tmpdir) if self.verbose: self.logger.debug("Free disk space on drive %s = %s" % (os.path.splitdrive(tmpdir)[0] or "/", humanize_bytes(du.free))) if du.free > estimated_size or self._save_to: if self.verbose: self.logger.debug("Frame will be stored in %s" % tmpdir) return tmpdir raise RuntimeError("The Frame is estimated to require at lest %s " "of memory, and you don't have that much available " "either in RAM or on a hard drive." % humanize_bytes(estimated_size))
Invoked from the C level, this function will return either the name of the folder where the datatable is to be saved; or None, indicating that the datatable should be read into RAM. This function may also raise an exception if it determines that it cannot find a good strategy to handle a dataset of the requested size.
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/fread.py#L685-L735
null
class GenericReader(object): """ Parser object for reading CSV files. """ def __init__(self, anysource=None, *, file=None, text=None, url=None, cmd=None, columns=None, sep=None, max_nrows=None, header=None, na_strings=None, verbose=False, fill=False, encoding=None, dec=".", skip_to_string=None, skip_to_line=None, save_to=None, nthreads=None, logger=None, skip_blank_lines=True, strip_whitespace=True, quotechar='"', **args): self._src = None # type: str self._file = None # type: str self._files = None # type: List[str] self._fileno = None # type: int self._tempfiles = [] # type: List[str] self._tempdir = None # type: str self._tempdir_own = False # type: bool self._text = None # type: Union[str, bytes] self._sep = None # type: str self._dec = None # type: str self._maxnrows = None # type: int self._header = None # type: bool self._nastrings = [] # type: List[str] self._verbose = False # type: bool self._fill = False # type: bool self._encoding = encoding # type: str self._quotechar = None # type: str self._skip_to_line = None self._skip_blank_lines = True self._skip_to_string = None self._strip_whitespace = True self._columns = None self._save_to = save_to self._nthreads = nthreads self._logger = None self._colnames = None self._bar_ends = None self._bar_symbols = None self._result = None if na_strings is None: na_strings = ["NA"] if "_tempdir" in args: self.tempdir = args.pop("_tempdir") self.verbose = verbose self.logger = logger if verbose: self.logger.debug("[1] Prepare for reading") self._resolve_source(anysource, file, text, cmd, url) self.columns = columns self.sep = sep self.dec = dec self.max_nrows = max_nrows self.header = header self.na_strings = na_strings self.fill = fill self.skip_to_string = skip_to_string self.skip_to_line = skip_to_line self.skip_blank_lines = skip_blank_lines self.strip_whitespace = strip_whitespace self.quotechar = quotechar if "separator" in args: self.sep = args.pop("separator") if "show_progress" in args: dtwarn("Parameter `show_progress` is ignored") args.pop("show_progress") if "progress_fn" in args: dtwarn("Parameter `progress_fn` is ignored") args.pop("progress_fn") if args: raise TTypeError("Unknown argument(s) %r in FReader(...)" % list(args.keys())) #--------------------------------------------------------------------------- # Resolve from various sources #--------------------------------------------------------------------------- def _resolve_source(self, anysource, file, text, cmd, url): args = (["any"] * (anysource is not None) + ["file"] * (file is not None) + ["text"] * (text is not None) + ["cmd"] * (cmd is not None) + ["url"] * (url is not None)) if len(args) == 0: raise TValueError( "No input source for `fread` was given. Please specify one of " "the parameters `file`, `text`, `url`, or `cmd`") if len(args) > 1: if anysource is None: raise TValueError( "Both parameters `%s` and `%s` cannot be passed to fread " "simultaneously." % (args[0], args[1])) else: args.remove("any") raise TValueError( "When an unnamed argument is passed, it is invalid to also " "provide the `%s` parameter." % (args[0], )) self._resolve_source_any(anysource) self._resolve_source_text(text) self._resolve_source_file(file) self._resolve_source_cmd(cmd) self._resolve_source_url(url) def _resolve_source_any(self, src): if src is None: return is_str = isinstance(src, str) if is_str or isinstance(src, bytes): # If there are any control characters (such as \n or \r) in the # text of `src`, then its type is "text". if len(src) >= 4096: if self.verbose: self.logger.debug("Input is a string of length %d, " "treating it as raw text" % len(src)) self._resolve_source_text(src) else: fn = ord if is_str else int for ch in src: ccode = fn(ch) if ccode < 0x20: if self.verbose: self.logger.debug("Input contains '\\x%02X', " "treating it as raw text" % ccode) self._resolve_source_text(src) return if is_str and re.match(_url_regex, src): if self.verbose: self.logger.debug("Input is a URL.") self._resolve_source_url(src) elif is_str and re.search(_glob_regex, src): if self.verbose: self.logger.debug("Input is a glob pattern.") self._resolve_source_list_of_files(glob.glob(src)) else: if self.verbose: self.logger.debug("Input is assumed to be a " "file name.") self._resolve_source_file(src) elif isinstance(src, _pathlike) or hasattr(src, "read"): self._resolve_source_file(src) elif isinstance(src, (list, tuple)): self._resolve_source_list_of_files(src) else: raise TTypeError("Unknown type for the first argument in fread: %r" % type(src)) def _resolve_source_text(self, text): if text is None: return if not isinstance(text, (str, bytes)): raise TTypeError("Invalid parameter `text` in fread: expected " "str or bytes, got %r" % type(text)) self._text = text self._src = "<text>" def _resolve_source_file(self, file): if file is None: return if isinstance(file, _pathlike): # `_pathlike` contains (str, bytes), and on Python 3.6 also # os.PathLike interface file = os.path.expanduser(file) file = os.fsdecode(file) elif isinstance(file, pathlib.Path): # This is only for Python 3.5; in Python 3.6 pathlib.Path implements # os.PathLike interface and is included in `_pathlike`. file = file.expanduser() file = str(file) elif hasattr(file, "read") and callable(file.read): # A builtin `file` object, or something similar. We check for the # presence of `fileno` attribute, which will allow us to provide a # more direct access to the underlying file. # noinspection PyBroadException try: # .fileno can be either a method, or a property # The implementation of .fileno may raise an exception too # (indicating that no file descriptor is available) fd = file.fileno if callable(fd): fd = fd() if not isinstance(fd, int) or fd <= 0: raise Exception self._fileno = fd except Exception: # Catching if: file.fileno is not defined, or is not an integer, # or raises an error, or returns a closed file descriptor rawtxt = file.read() self._text = rawtxt file = getattr(file, "name", None) if not isinstance(file, (str, bytes)): self._src = "<file>" elif isinstance(file, bytes): self._src = os.fsdecode(file) else: self._src = file return else: raise TTypeError("Invalid parameter `file` in fread: expected a " "str/bytes/PathLike, got %r" % type(file)) # if `file` is not str, then `os.path.join(file, "..")` below will fail assert isinstance(file, str) if not os.path.exists(file): # File does not exist -- search up the tree for the first file that # does. This will allow us to provide a better error message to the # user; also if the first path component that exists is a file (not # a folder), then the user probably tries to specify a file within # an archive -- and this is not an error at all! xpath = os.path.abspath(file) ypath = xpath while not os.path.exists(xpath): xpath = os.path.abspath(os.path.join(xpath, "..")) ypath = ypath[len(xpath):] if os.path.isfile(xpath): self._resolve_archive(xpath, ypath) return else: raise TValueError("File %s`%s` does not exist" % (xpath, ypath)) if not os.path.isfile(file): raise TValueError("Path `%s` is not a file" % file) self._src = file self._resolve_archive(file) def _resolve_source_list_of_files(self, files_list): self._files = [] for s in files_list: self._resolve_source_file(s) entry = (self._src, self._file, self._fileno, self._text) self._files.append(entry) def _resolve_source_cmd(self, cmd): if cmd is None: return if not isinstance(cmd, str): raise TTypeError("Invalid parameter `cmd` in fread: expected str, " "got %r" % type(cmd)) result = os.popen(cmd) self._text = result.read() self._src = cmd def _resolve_source_url(self, url): if url is not None: import urllib.request targetfile = tempfile.mktemp(dir=self.tempdir) urllib.request.urlretrieve(url, filename=targetfile) self._tempfiles.append(targetfile) self._file = targetfile self._src = url def _resolve_archive(self, filename, subpath=None): ext = os.path.splitext(filename)[1] if subpath and subpath[0] == "/": subpath = subpath[1:] if ext == ".zip": import zipfile zf = zipfile.ZipFile(filename) # MacOS is found guilty of adding extra files into the Zip archives # it creates. The files are hidden, and in the directory __MACOSX/. # We remove those files from the list, since they are not real user # files, and have an unknown binary format. zff = [name for name in zf.namelist() if not(name.startswith("__MACOSX/") or name.endswith("/"))] if subpath: if subpath in zff: zff = [subpath] else: raise TValueError("File `%s` does not exist in archive " "`%s`" % (subpath, filename)) if len(zff) > 1: self.logger.warning("Zip file %s contains multiple compressed " "files: %r. Only the first of them will be " "used." % (filename, zff)) if len(zff) == 0: raise TValueError("Zip file %s is empty" % filename) self._tempdir = tempfile.mkdtemp() if self._verbose: self.logger.debug("Extracting %s to temporary directory %s" % (filename, self._tempdir)) self._tempfiles.append(zf.extract(zff[0], path=self._tempdir)) self._file = self._tempfiles[-1] elif ext == ".gz": import gzip zf = gzip.GzipFile(filename, mode="rb") if self._verbose: self.logger.debug("Extracting %s into memory" % filename) self._text = zf.read() if self._verbose: self.logger.debug("Extracted: size = %d" % len(self._text)) elif ext == ".bz2": import bz2 zf = bz2.open(filename, mode="rb") if self._verbose: self.logger.debug("Extracting %s into memory" % filename) self._text = zf.read() if self._verbose: self.logger.debug("Extracted: size = %d" % len(self._text)) elif ext == ".xz": import lzma zf = lzma.open(filename, mode="rb") if self._verbose: self.logger.debug("Extracting %s into memory" % filename) self._text = zf.read() if self._verbose: self.logger.debug("Extracted: size = %d" % len(self._text)) elif ext == ".xlsx" or ext == ".xls": self._result = read_xls_workbook(filename, subpath) else: self._file = filename #--------------------------------------------------------------------------- # Properties #--------------------------------------------------------------------------- @property def src(self) -> str: """ Name of the source of the data. This is a "portmanteau" value, intended mostly for displaying in error messages or verbose output. This value contains one of: - the name of the file requested by the user (possibly with minor modifications such as user/glob expansion). This never gives the name of a temporary file created by FRead internally. - URL text, if the user provided a url to fread. - special token "<file>" if an open file object was provided, but its file name is not known. - "<text>" if the input was a raw text. In order to determine the actual data source, the caller should query properties `.file`, `.text` and `.fileno`. One and only one of them will be non-None. """ return self._src @property def file(self) -> Optional[str]: """ Name of the file to be read. This always refers to the actual file, on a file system, that the underlying C code is expected to open and read. In particular, if the "original" source (as provided by the user) required processing the content and saving it into a temporary file, then this property will return the name of that temporary file. On the other hand, if the source is not a file, this property will return None. The returned value is always a string, even if the user passed a `bytes` object as `file=` argument to the constructor. """ return self._file @property def text(self) -> Union[str, bytes, None]: """ String/bytes object with the content to read. The returned value is None if the content should be read from file or some other source. """ return self._text @property def fileno(self) -> Optional[int]: """ File descriptor of an open file that should be read. This property is an equivalent way of specifying a file source. However instead of providing a file name, this property gives a file descriptor of a file that was already opened. The caller should not attempt to close this file. """ return self._fileno @property def tempdir(self): if self._tempdir is None: self._tempdir = tempfile.mkdtemp() self._tempdir_own = True return self._tempdir @tempdir.setter @typed(tempdir=str) def tempdir(self, tempdir): self._tempdir = tempdir self._tempdir_own = False @property def columns(self): return self._columns @columns.setter def columns(self, columns): self._columns = columns or None @property def sep(self): return self._sep @sep.setter @typed(sep=U(str, None)) def sep(self, sep): if sep == "": self._sep = "\n" elif not sep: self._sep = None else: if len(sep) > 1: raise TValueError("Multi-character separator %r not supported" % sep) if ord(sep) > 127: raise TValueError("The separator should be an ASCII character, " "got %r" % sep) self._sep = sep @property def dec(self): return self._dec @dec.setter def dec(self, v): if v == "." or v == ",": self._dec = v else: raise ValueError("Only dec='.' or ',' are allowed") @property def max_nrows(self): return self._maxnrows @max_nrows.setter @typed(max_nrows=U(int, None)) def max_nrows(self, max_nrows): if max_nrows is None or max_nrows < 0: max_nrows = -1 self._maxnrows = max_nrows @property def header(self): return self._header @header.setter @typed(header=U(bool, None)) def header(self, header): self._header = header @property def na_strings(self): return self._nastrings @na_strings.setter @typed() def na_strings(self, na_strings: List[str]): self._nastrings = na_strings @property def verbose(self): return self._verbose @verbose.setter @typed(verbose=bool) def verbose(self, verbose): self._verbose = verbose @property def fill(self): return self._fill @fill.setter @typed(fill=bool) def fill(self, fill): self._fill = fill @property def skip_to_string(self): return self._skip_to_string @skip_to_string.setter @typed(s=U(str, None)) def skip_to_string(self, s): self._skip_to_string = s or None @property def skip_to_line(self): return self._skip_to_line @skip_to_line.setter @typed(n=U(int, None)) def skip_to_line(self, n): self._skip_to_line = n @property def skip_blank_lines(self) -> bool: return self._skip_blank_lines @skip_blank_lines.setter @typed() def skip_blank_lines(self, v: bool): self._skip_blank_lines = v @property def strip_whitespace(self) -> bool: return self._strip_whitespace @strip_whitespace.setter @typed() def strip_whitespace(self, v: bool): self._strip_whitespace = v @property def quotechar(self): return self._quotechar @quotechar.setter @typed() def quotechar(self, v: Optional[str]): if v not in {None, "", "'", '"', "`"}: raise ValueError("quotechar should be one of [\"'`] or '' or None") self._quotechar = v @property def nthreads(self): """Number of threads to use when reading the file.""" return self._nthreads @nthreads.setter @typed(nth=U(int, None)) def nthreads(self, nth): self._nthreads = nth @property def logger(self): return self._logger @logger.setter def logger(self, l): if l is None: # reset to the default logger l = _DefaultLogger() else: # If custom logger is provided, turn on the verbose mode self.verbose = True if not(hasattr(l, "debug") and callable(l.debug) and (hasattr(l.debug, "__func__") and l.debug.__func__.__code__.co_argcount >= 2 or isinstance(l, type) and hasattr(l.debug, "__code__") and l.debug.__code__.co_argcount >= 1)): # Allow either an instance of a class with .debug(self, msg) method, # or the class itself, with static `.debug(msg)` method. raise TTypeError("`logger` parameter must be a class with method " ".debug() taking at least one argument") self._logger = l #--------------------------------------------------------------------------- def read(self): try: if self._result: return self._result if self._files: res = {} for src, filename, fileno, txt in self._files: self._src = src self._file = filename self._fileno = fileno self._txt = txt self._colnames = None try: res[src] = core.gread(self) except Exception as e: res[src] = e return res else: return core.gread(self) finally: self._clear_temporary_files() #--------------------------------------------------------------------------- def _clear_temporary_files(self): for f in self._tempfiles: try: if self._verbose: self.logger.debug("Removing temporary file %s" % f) os.remove(f) except OSError as e: self.logger.warning("Failed to remove a temporary file: %r" % e) if self._tempdir_own: shutil.rmtree(self._tempdir, ignore_errors=True) #--------------------------------------------------------------------------- # Process `columns` argument #--------------------------------------------------------------------------- def _set_column_names(self, colnames): """ Invoked by `gread` from C++ to inform the class about the detected column names. This method is a simplified version of `_override_columns`, and will only be invoked if `self._columns` is None. """ self._colnames = colnames def _override_columns0(self, coldescs): return self._override_columns1(self._columns, coldescs) def _override_columns1(self, colspec, coldescs): if isinstance(colspec, (slice, range)): return self._apply_columns_slice(colspec, coldescs) if isinstance(colspec, set): return self._apply_columns_set(colspec, coldescs) if isinstance(colspec, (list, tuple)): return self._apply_columns_list(colspec, coldescs) if isinstance(colspec, dict): return self._apply_columns_dict(colspec, coldescs) if isinstance(colspec, (type, stype, ltype)): newcs = {colspec: slice(None)} return self._apply_columns_dict(newcs, coldescs) if callable(colspec): return self._apply_columns_function(colspec, coldescs) print(colspec, coldescs) raise RuntimeError("Unknown colspec: %r" # pragma: no cover % colspec) def _apply_columns_slice(self, colslice, colsdesc): n = len(colsdesc) if isinstance(colslice, slice): start, count, step = normalize_slice(colslice, n) else: t = normalize_range(colslice, n) if t is None: raise TValueError("Invalid range iterator for a file with " "%d columns: %r" % (n, colslice)) start, count, step = t if step <= 0: raise TValueError("Cannot use slice/range with negative step " "for column filter: %r" % colslice) colnames = [None] * count coltypes = [rtype.rdrop.value] * n for j in range(count): i = start + j * step colnames[j] = colsdesc[i].name coltypes[i] = rtype.rauto.value self._colnames = colnames return coltypes def _apply_columns_set(self, colset, colsdesc): n = len(colsdesc) # Make a copy of the `colset` in order to check whether all the # columns requested by the user were found, and issue a warning # otherwise. requested_cols = colset.copy() colnames = [] coltypes = [rtype.rdrop.value] * n for i in range(n): colname = colsdesc[i][0] if colname in colset: requested_cols.discard(colname) colnames.append(colname) coltypes[i] = rtype.rauto.value if requested_cols: self.logger.warning("Column(s) %r not found in the input file" % list(requested_cols)) self._colnames = colnames return coltypes def _apply_columns_list(self, collist, colsdesc): n = len(colsdesc) nn = len(collist) if n != nn: raise TValueError("Input contains %s, whereas `columns` " "parameter specifies only %s" % (plural(n, "column"), plural(nn, "column"))) colnames = [] coltypes = [rtype.rdrop.value] * n for i in range(n): entry = collist[i] if entry is None or entry is False: pass elif entry is True or entry is Ellipsis: colnames.append(colsdesc[i].name) coltypes[i] = rtype.rauto.value elif isinstance(entry, str): colnames.append(entry) coltypes[i] = rtype.rauto.value elif isinstance(entry, (stype, ltype, type)): colnames.append(colsdesc[i].name) coltypes[i] = _rtypes_map[entry].value elif isinstance(entry, tuple): newname, newtype = entry if newtype not in _rtypes_map: raise TValueError("Unknown type %r used as an override " "for column %r" % (newtype, newname)) colnames.append(newname) coltypes[i] = _rtypes_map[newtype].value else: raise TTypeError("Entry `columns[%d]` has invalid type %r" % (i, entry.__class__.__name__)) self._colnames = colnames return coltypes def _apply_columns_dict(self, colsdict, colsdesc): default_entry = colsdict.get(..., ...) colnames = [] coltypes = [rtype.rdrop.value] * len(colsdesc) new_entries = {} for key, val in colsdict.items(): if isinstance(key, (type, stype, ltype)): if isinstance(val, str): val = [val] if isinstance(val, slice): val = [colsdesc[i].name for i in range(*val.indices(len(colsdesc)))] if isinstance(val, range): val = [colsdesc[i].name for i in val] if isinstance(val, (list, tuple, set)): for entry in val: if not isinstance(entry, str): raise TTypeError( "Type %s in the `columns` parameter should map" " to a string or list of strings (column names)" "; however it contains an entry %r" % (key, entry)) if entry in colsdict: continue new_entries[entry] = key else: raise TTypeError( "Unknown entry %r for %s in `columns`" % (val, key)) if new_entries: colsdict = {**colsdict, **new_entries} for i, desc in enumerate(colsdesc): name = desc.name entry = colsdict.get(name, default_entry) if entry is None: pass # coltype is already "drop" elif entry is Ellipsis: colnames.append(name) coltypes[i] = rtype.rauto.value elif isinstance(entry, str): colnames.append(entry) coltypes[i] = rtype.rauto.value elif isinstance(entry, (stype, ltype, type)): colnames.append(name) coltypes[i] = _rtypes_map[entry].value elif isinstance(entry, tuple): newname, newtype = entry colnames.append(newname) coltypes[i] = _rtypes_map[newtype].value assert isinstance(newname, str) if not coltypes[i]: raise TValueError("Unknown type %r used as an override " "for column %r" % (newtype, newname)) else: raise TTypeError("Unknown value %r for column '%s' in " "columns descriptor" % (entry, name)) self._colnames = colnames return coltypes def _apply_columns_function(self, colsfn, colsdesc): res = colsfn(colsdesc) return self._override_columns1(res, colsdesc)
h2oai/datatable
datatable/nff.py
save_nff
python
def save_nff(self, dest, _strategy="auto"): if _strategy not in ("auto", "write", "mmap"): raise TValueError("Invalid parameter _strategy: only 'write' / 'mmap' " "/ 'auto' are allowed") dest = os.path.expanduser(dest) if not os.path.exists(dest): os.makedirs(dest) self.materialize() mins = self.min().to_list() maxs = self.max().to_list() metafile = os.path.join(dest, "_meta.nff") with _builtin_open(metafile, "w", encoding="utf-8") as out: out.write("# NFF2\n") out.write("# nrows = %d\n" % self.nrows) out.write('filename,stype,meta,colname,min,max\n') l = len(str(self.ncols)) for i in range(self.ncols): filename = "c%0*d" % (l, i + 1) colname = self.names[i].replace('"', '""') stype = self.stypes[i] if stype == dt.stype.obj64: dtwarn("Column %r of type obj64 was not saved" % self.names[i]) continue smin = _stringify(mins[i][0]) smax = _stringify(maxs[i][0]) out.write('%s,%s,,"%s",%s,%s\n' % (filename, stype.code, colname, smin, smax)) filename = os.path.join(dest, filename) core._column_save_to_disk(self, i, filename, _strategy)
Save Frame in binary NFF/Jay format. :param dest: destination where the Frame should be saved. :param _strategy: one of "mmap", "write" or "auto"
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/nff.py#L24-L61
[ "def dtwarn(message):\n warnings.warn(message, category=DatatableWarning)\n", "def _stringify(x):\n if x is None:\n return \"\"\n if isinstance(x, bool):\n return str(int(x))\n return str(x)\n" ]
#!/usr/bin/env python3 # © H2O.ai 2018; -*- encoding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #------------------------------------------------------------------------------- import os import re import datatable as dt from datatable.lib import core from datatable.utils.typechecks import TTypeError, TValueError, dtwarn _builtin_open = open def _stringify(x): if x is None: return "" if isinstance(x, bool): return str(int(x)) return str(x) def open(path): if isinstance(path, bytes): return core.open_jay(path) if not isinstance(path, str): raise TTypeError("Parameter `path` should be a string") path = os.path.expanduser(path) if not os.path.exists(path): msg = "Path %s does not exist" % path if not path.startswith("/"): msg += " (current directory = %s)" % os.getcwd() raise ValueError(msg) if not os.path.isdir(path): return core.open_jay(path) nff_version = None nrows = 0 metafile = os.path.join(path, "_meta.nff") with _builtin_open(metafile, encoding="utf-8") as inp: info = [] for line in inp: if line.startswith("#"): info.append(line[1:].strip()) else: break if not (info and info[0].startswith("NFF")): raise ValueError("File _meta.nff has invalid format") if info[0] == "NFF1": nff_version = 1 elif info[0] == "NFF1+": nff_version = 1.5 elif info[0] == "NFF2": nff_version = 2 if nff_version: assert len(info) == 2 mm = re.match(r"nrows\s*=\s*(\d+)", info[1]) if mm: nrows = int(mm.group(1)) else: raise ValueError("nrows info not found in line %r" % info[1]) else: raise ValueError("Unknown NFF format: %s" % info[0]) coltypes = [dt.stype.str32] * 4 if nff_version > 1: coltypes += [None] * 2 f0 = dt.fread(metafile, sep=",", columns=coltypes) f1 = f0[:, ["filename", "stype"]] colnames = f0[:, "colname"].to_list()[0] df = core.open_nff(f1, nrows, path, nff_version < 2, colnames) assert df.nrows == nrows, "Wrong number of rows read: %d" % df.nrows return df
constverum/ProxyBroker
proxybroker/api.py
Broker.grab
python
async def grab(self, *, countries=None, limit=0): self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task)
Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxybroker-examples-grab>`.
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/api.py#L112-L124
[ "async def _grab(self, types=None, check=False):\n def _get_tasks(by=MAX_CONCURRENT_PROVIDERS):\n providers = [\n pr\n for pr in self._providers\n if not types or not pr.proto or bool(pr.proto & types.keys())\n ]\n while providers:\n tasks = [\n asyncio.ensure_future(pr.get_proxies())\n for pr in providers[:by]\n ]\n del providers[:by]\n self._all_tasks.extend(tasks)\n yield tasks\n\n log.debug('Start grabbing proxies')\n while True:\n for tasks in _get_tasks():\n for task in asyncio.as_completed(tasks):\n proxies = await task\n for proxy in proxies:\n await self._handle(proxy, check=check)\n log.debug('Grab cycle is complete')\n if self._server:\n log.debug('fall asleep for %d seconds' % GRAB_PAUSE)\n await asyncio.sleep(GRAB_PAUSE)\n log.debug('awaked')\n else:\n break\n await self._on_check.join()\n self._done()\n" ]
class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxybroker.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxybroker.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, **kwargs ): self._loop = loop or asyncio.get_event_loop() self._proxies = queue or asyncio.Queue(loop=self._loop) self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn, loop=self._loop) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxybroker-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxybroker-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try: proxy = await Proxy.create( *proxy, timeout=self._timeout, resolver=self._resolver, verify_ssl=self._verify_ssl, loop=self._loop, ) except (ResolveError, ValueError): return if not self._is_unique(proxy) or not self._geo_passed(proxy): return if check: await self._push_to_check(proxy) else: self._push_to_result(proxy) def _is_unique(self, proxy): if (proxy.host, proxy.port) not in self.unique_proxies: self.unique_proxies[(proxy.host, proxy.port)] = proxy return True else: return False def _geo_passed(self, proxy): if self._countries and (proxy.geo.code not in self._countries): proxy.log('Location of proxy is outside the given countries list') return False else: return True async def _push_to_check(self, proxy): def _task_done(proxy, f): self._on_check.task_done() if not self._on_check.empty(): self._on_check.get_nowait() try: if f.result(): # proxy is working and its types is equal to the requested self._push_to_result(proxy) except asyncio.CancelledError: pass if self._server and not self._proxies.empty() and self._limit <= 0: log.debug( 'pause. proxies: %s; limit: %s' % (self._proxies.qsize(), self._limit) ) await self._proxies.join() log.debug('unpause. proxies: %s' % self._proxies.qsize()) await self._on_check.put(None) task = asyncio.ensure_future(self._checker.check(proxy)) task.add_done_callback(partial(_task_done, proxy)) self._all_tasks.append(task) def _push_to_result(self, proxy): log.debug('push to result: %r' % proxy) self._proxies.put_nowait(proxy) self._update_limit() def _update_limit(self): self._limit -= 1 if self._limit == 0 and not self._server: self._done() def stop(self): """Stop all tasks, and the local proxy server if it's running.""" self._done() if self._server: self._server.stop() self._server = None log.info('Stop!') def _done(self): log.debug('called done') while self._all_tasks: task = self._all_tasks.pop() if not task.done(): task.cancel() self._push_to_result(None) log.info('Done! Total found proxies: %d' % len(self.unique_proxies)) def show_stats(self, verbose=False, **kwargs): """Show statistics on the found proxies. Useful for debugging, but you can also use if you're interested. :param verbose: Flag indicating whether to print verbose stats .. deprecated:: 0.2.0 Use :attr:`verbose` instead of :attr:`full`. """ if kwargs: verbose = True warnings.warn( '`full` in `show_stats` is deprecated, ' 'use `verbose` instead.', DeprecationWarning, ) found_proxies = self.unique_proxies.values() num_working_proxies = len([p for p in found_proxies if p.is_working]) if not found_proxies: print('Proxy not found') return errors = Counter() for p in found_proxies: errors.update(p.stat['errors']) proxies_by_type = { 'SOCKS5': [], 'SOCKS4': [], 'HTTPS': [], 'HTTP': [], 'CONNECT:80': [], 'CONNECT:25': [], } stat = { 'Wrong country': [], 'Wrong protocol/anonymity lvl': [], 'Connection success': [], 'Connection timeout': [], 'Connection failed': [], } for p in found_proxies: msgs = ' '.join([l[1] for l in p.get_log()]) full_log = [p] for proto in p.types: proxies_by_type[proto].append(p) if 'Location of proxy' in msgs: stat['Wrong country'].append(p) elif 'Connection: success' in msgs: if 'Protocol or the level' in msgs: stat['Wrong protocol/anonymity lvl'].append(p) stat['Connection success'].append(p) if not verbose: continue events_by_ngtr = defaultdict(list) for ngtr, event, runtime in p.get_log(): events_by_ngtr[ngtr].append((event, runtime)) for ngtr, events in sorted( events_by_ngtr.items(), key=lambda item: item[0] ): full_log.append('\t%s' % ngtr) for event, runtime in events: if event.startswith('Initial connection'): full_log.append('\t\t-------------------') else: full_log.append( '\t\t{:<66} Runtime: {:.2f}'.format( event, runtime ) ) for row in full_log: print(row) elif 'Connection: failed' in msgs: stat['Connection failed'].append(p) else: stat['Connection timeout'].append(p) if verbose: print('Stats:') pprint(stat) print('The number of working proxies: %d' % num_working_proxies) for proto, proxies in proxies_by_type.items(): print('%s (%s): %s' % (proto, len(proxies), proxies)) print('Errors:', errors)
constverum/ProxyBroker
proxybroker/api.py
Broker.find
python
async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs ): ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks)
Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxybroker-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required.
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/api.py#L126-L200
[ "def _update_types(types):\n _types = {}\n if not types:\n return _types\n elif isinstance(types, dict):\n return types\n for tp in types:\n lvl = None\n if isinstance(tp, (list, tuple, set)):\n tp, lvl = tp[0], tp[1]\n if isinstance(lvl, str):\n lvl = lvl.split()\n _types[tp] = lvl\n return _types\n", "async def _load(self, data, check=True):\n \"\"\"Looking for proxies in the passed data.\n\n Transform the passed data from [raw string | file-like object | list]\n to set {(host, port), ...}: {('192.168.0.1', '80'), }\n \"\"\"\n log.debug('Load proxies from the raw data')\n if isinstance(data, io.TextIOWrapper):\n data = data.read()\n if isinstance(data, str):\n data = IPPortPatternLine.findall(data)\n proxies = set(data)\n for proxy in proxies:\n await self._handle(proxy, check=check)\n await self._on_check.join()\n self._done()\n", "async def _grab(self, types=None, check=False):\n def _get_tasks(by=MAX_CONCURRENT_PROVIDERS):\n providers = [\n pr\n for pr in self._providers\n if not types or not pr.proto or bool(pr.proto & types.keys())\n ]\n while providers:\n tasks = [\n asyncio.ensure_future(pr.get_proxies())\n for pr in providers[:by]\n ]\n del providers[:by]\n self._all_tasks.extend(tasks)\n yield tasks\n\n log.debug('Start grabbing proxies')\n while True:\n for tasks in _get_tasks():\n for task in asyncio.as_completed(tasks):\n proxies = await task\n for proxy in proxies:\n await self._handle(proxy, check=check)\n log.debug('Grab cycle is complete')\n if self._server:\n log.debug('fall asleep for %d seconds' % GRAB_PAUSE)\n await asyncio.sleep(GRAB_PAUSE)\n log.debug('awaked')\n else:\n break\n await self._on_check.join()\n self._done()\n", "async def check_judges(self):\n # TODO: need refactoring\n log.debug('Start check judges')\n stime = time.time()\n await asyncio.gather(\n *[j.check(real_ext_ip=self._real_ext_ip) for j in self._judges]\n )\n\n self._judges = [j for j in self._judges if j.is_working]\n log.debug(\n '%d judges added. Runtime: %.4f;'\n % (len(self._judges), time.time() - stime)\n )\n\n nojudges = []\n disable_protocols = []\n\n if len(Judge.available['HTTP']) == 0:\n nojudges.append('HTTP')\n disable_protocols.extend(['HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5'])\n self._req_http_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTP'].set()\n if len(Judge.available['HTTPS']) == 0:\n nojudges.append('HTTPS')\n disable_protocols.append('HTTPS')\n self._req_https_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTPS'].set()\n if len(Judge.available['SMTP']) == 0:\n # nojudges.append('SMTP')\n disable_protocols.append('SMTP')\n self._req_smtp_proto = False\n # for coroutines, which is already waiting\n Judge.ev['SMTP'].set()\n\n for proto in disable_protocols:\n if proto in self._ngtrs:\n self._ngtrs.remove(proto)\n\n if nojudges:\n warnings.warn(\n 'Not found judges for the {nojudges} protocol.\\n'\n 'Checking proxy on protocols {disp} is disabled.'.format(\n nojudges=nojudges, disp=disable_protocols\n ),\n UserWarning,\n )\n if self._judges:\n log.debug('Loaded: %d proxy judges' % len(set(self._judges)))\n else:\n RuntimeError('Not found judges')\n", "async def get_real_ext_ip(self):\n \"\"\"Return real external IP address.\"\"\"\n while self._ip_hosts:\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with aiohttp.ClientSession(\n timeout=timeout, loop=self._loop\n ) as session, session.get(self._pop_random_ip_host()) as resp:\n ip = await resp.text()\n except asyncio.TimeoutError:\n pass\n else:\n ip = ip.strip()\n if self.host_is_ip(ip):\n log.debug('Real external IP: %s', ip)\n break\n else:\n raise RuntimeError('Could not get the external IP')\n return ip\n" ]
class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxybroker.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxybroker.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, **kwargs ): self._loop = loop or asyncio.get_event_loop() self._proxies = queue or asyncio.Queue(loop=self._loop) self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn, loop=self._loop) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxybroker-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxybroker-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try: proxy = await Proxy.create( *proxy, timeout=self._timeout, resolver=self._resolver, verify_ssl=self._verify_ssl, loop=self._loop, ) except (ResolveError, ValueError): return if not self._is_unique(proxy) or not self._geo_passed(proxy): return if check: await self._push_to_check(proxy) else: self._push_to_result(proxy) def _is_unique(self, proxy): if (proxy.host, proxy.port) not in self.unique_proxies: self.unique_proxies[(proxy.host, proxy.port)] = proxy return True else: return False def _geo_passed(self, proxy): if self._countries and (proxy.geo.code not in self._countries): proxy.log('Location of proxy is outside the given countries list') return False else: return True async def _push_to_check(self, proxy): def _task_done(proxy, f): self._on_check.task_done() if not self._on_check.empty(): self._on_check.get_nowait() try: if f.result(): # proxy is working and its types is equal to the requested self._push_to_result(proxy) except asyncio.CancelledError: pass if self._server and not self._proxies.empty() and self._limit <= 0: log.debug( 'pause. proxies: %s; limit: %s' % (self._proxies.qsize(), self._limit) ) await self._proxies.join() log.debug('unpause. proxies: %s' % self._proxies.qsize()) await self._on_check.put(None) task = asyncio.ensure_future(self._checker.check(proxy)) task.add_done_callback(partial(_task_done, proxy)) self._all_tasks.append(task) def _push_to_result(self, proxy): log.debug('push to result: %r' % proxy) self._proxies.put_nowait(proxy) self._update_limit() def _update_limit(self): self._limit -= 1 if self._limit == 0 and not self._server: self._done() def stop(self): """Stop all tasks, and the local proxy server if it's running.""" self._done() if self._server: self._server.stop() self._server = None log.info('Stop!') def _done(self): log.debug('called done') while self._all_tasks: task = self._all_tasks.pop() if not task.done(): task.cancel() self._push_to_result(None) log.info('Done! Total found proxies: %d' % len(self.unique_proxies)) def show_stats(self, verbose=False, **kwargs): """Show statistics on the found proxies. Useful for debugging, but you can also use if you're interested. :param verbose: Flag indicating whether to print verbose stats .. deprecated:: 0.2.0 Use :attr:`verbose` instead of :attr:`full`. """ if kwargs: verbose = True warnings.warn( '`full` in `show_stats` is deprecated, ' 'use `verbose` instead.', DeprecationWarning, ) found_proxies = self.unique_proxies.values() num_working_proxies = len([p for p in found_proxies if p.is_working]) if not found_proxies: print('Proxy not found') return errors = Counter() for p in found_proxies: errors.update(p.stat['errors']) proxies_by_type = { 'SOCKS5': [], 'SOCKS4': [], 'HTTPS': [], 'HTTP': [], 'CONNECT:80': [], 'CONNECT:25': [], } stat = { 'Wrong country': [], 'Wrong protocol/anonymity lvl': [], 'Connection success': [], 'Connection timeout': [], 'Connection failed': [], } for p in found_proxies: msgs = ' '.join([l[1] for l in p.get_log()]) full_log = [p] for proto in p.types: proxies_by_type[proto].append(p) if 'Location of proxy' in msgs: stat['Wrong country'].append(p) elif 'Connection: success' in msgs: if 'Protocol or the level' in msgs: stat['Wrong protocol/anonymity lvl'].append(p) stat['Connection success'].append(p) if not verbose: continue events_by_ngtr = defaultdict(list) for ngtr, event, runtime in p.get_log(): events_by_ngtr[ngtr].append((event, runtime)) for ngtr, events in sorted( events_by_ngtr.items(), key=lambda item: item[0] ): full_log.append('\t%s' % ngtr) for event, runtime in events: if event.startswith('Initial connection'): full_log.append('\t\t-------------------') else: full_log.append( '\t\t{:<66} Runtime: {:.2f}'.format( event, runtime ) ) for row in full_log: print(row) elif 'Connection: failed' in msgs: stat['Connection failed'].append(p) else: stat['Connection timeout'].append(p) if verbose: print('Stats:') pprint(stat) print('The number of working proxies: %d' % num_working_proxies) for proto, proxies in proxies_by_type.items(): print('%s (%s): %s' % (proto, len(proxies), proxies)) print('Errors:', errors)
constverum/ProxyBroker
proxybroker/api.py
Broker.serve
python
def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task)
Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxybroker-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/api.py#L202-L288
[ "async def find(\n self,\n *,\n types=None,\n data=None,\n countries=None,\n post=False,\n strict=False,\n dnsbl=None,\n limit=0,\n **kwargs\n):\n \"\"\"Gather and check proxies from providers or from a passed data.\n\n :ref:`Example of usage <proxybroker-examples-find>`.\n\n :param list types:\n Types (protocols) that need to be check on support by proxy.\n Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25\n And levels of anonymity (HTTP only): Transparent, Anonymous, High\n :param data:\n (optional) String or list with proxies. Also can be a file-like\n object supports `read()` method. Used instead of providers\n :param list countries:\n (optional) List of ISO country codes where should be located\n proxies\n :param bool post:\n (optional) Flag indicating use POST instead of GET for requests\n when checking proxies\n :param bool strict:\n (optional) Flag indicating that anonymity levels of types\n (protocols) supported by a proxy must be equal to the requested\n types and levels of anonymity. By default, strict mode is off and\n for a successful check is enough to satisfy any one of the\n requested types\n :param list dnsbl:\n (optional) Spam databases for proxy checking.\n `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_\n :param int limit: (optional) The maximum number of proxies\n\n :raises ValueError:\n If :attr:`types` not given.\n\n .. versionchanged:: 0.2.0\n Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`.\n Changed: :attr:`types` is required.\n \"\"\"\n ip = await self._resolver.get_real_ext_ip()\n types = _update_types(types)\n\n if not types:\n raise ValueError('`types` is required')\n\n self._checker = Checker(\n judges=self._judges,\n timeout=self._timeout,\n verify_ssl=self._verify_ssl,\n max_tries=self._max_tries,\n real_ext_ip=ip,\n types=types,\n post=post,\n strict=strict,\n dnsbl=dnsbl,\n loop=self._loop,\n )\n self._countries = countries\n self._limit = limit\n\n tasks = [asyncio.ensure_future(self._checker.check_judges())]\n if data:\n task = asyncio.ensure_future(self._load(data, check=True))\n else:\n task = asyncio.ensure_future(self._grab(types, check=True))\n tasks.append(task)\n self._all_tasks.extend(tasks)\n", "def start(self):\n srv = asyncio.start_server(\n self._accept,\n host=self.host,\n port=self.port,\n backlog=self._backlog,\n loop=self._loop,\n )\n self._server = self._loop.run_until_complete(srv)\n\n log.info(\n 'Listening established on {0}'.format(\n self._server.sockets[0].getsockname()\n )\n )\n" ]
class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxybroker.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxybroker.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, **kwargs ): self._loop = loop or asyncio.get_event_loop() self._proxies = queue or asyncio.Queue(loop=self._loop) self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn, loop=self._loop) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxybroker-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxybroker-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try: proxy = await Proxy.create( *proxy, timeout=self._timeout, resolver=self._resolver, verify_ssl=self._verify_ssl, loop=self._loop, ) except (ResolveError, ValueError): return if not self._is_unique(proxy) or not self._geo_passed(proxy): return if check: await self._push_to_check(proxy) else: self._push_to_result(proxy) def _is_unique(self, proxy): if (proxy.host, proxy.port) not in self.unique_proxies: self.unique_proxies[(proxy.host, proxy.port)] = proxy return True else: return False def _geo_passed(self, proxy): if self._countries and (proxy.geo.code not in self._countries): proxy.log('Location of proxy is outside the given countries list') return False else: return True async def _push_to_check(self, proxy): def _task_done(proxy, f): self._on_check.task_done() if not self._on_check.empty(): self._on_check.get_nowait() try: if f.result(): # proxy is working and its types is equal to the requested self._push_to_result(proxy) except asyncio.CancelledError: pass if self._server and not self._proxies.empty() and self._limit <= 0: log.debug( 'pause. proxies: %s; limit: %s' % (self._proxies.qsize(), self._limit) ) await self._proxies.join() log.debug('unpause. proxies: %s' % self._proxies.qsize()) await self._on_check.put(None) task = asyncio.ensure_future(self._checker.check(proxy)) task.add_done_callback(partial(_task_done, proxy)) self._all_tasks.append(task) def _push_to_result(self, proxy): log.debug('push to result: %r' % proxy) self._proxies.put_nowait(proxy) self._update_limit() def _update_limit(self): self._limit -= 1 if self._limit == 0 and not self._server: self._done() def stop(self): """Stop all tasks, and the local proxy server if it's running.""" self._done() if self._server: self._server.stop() self._server = None log.info('Stop!') def _done(self): log.debug('called done') while self._all_tasks: task = self._all_tasks.pop() if not task.done(): task.cancel() self._push_to_result(None) log.info('Done! Total found proxies: %d' % len(self.unique_proxies)) def show_stats(self, verbose=False, **kwargs): """Show statistics on the found proxies. Useful for debugging, but you can also use if you're interested. :param verbose: Flag indicating whether to print verbose stats .. deprecated:: 0.2.0 Use :attr:`verbose` instead of :attr:`full`. """ if kwargs: verbose = True warnings.warn( '`full` in `show_stats` is deprecated, ' 'use `verbose` instead.', DeprecationWarning, ) found_proxies = self.unique_proxies.values() num_working_proxies = len([p for p in found_proxies if p.is_working]) if not found_proxies: print('Proxy not found') return errors = Counter() for p in found_proxies: errors.update(p.stat['errors']) proxies_by_type = { 'SOCKS5': [], 'SOCKS4': [], 'HTTPS': [], 'HTTP': [], 'CONNECT:80': [], 'CONNECT:25': [], } stat = { 'Wrong country': [], 'Wrong protocol/anonymity lvl': [], 'Connection success': [], 'Connection timeout': [], 'Connection failed': [], } for p in found_proxies: msgs = ' '.join([l[1] for l in p.get_log()]) full_log = [p] for proto in p.types: proxies_by_type[proto].append(p) if 'Location of proxy' in msgs: stat['Wrong country'].append(p) elif 'Connection: success' in msgs: if 'Protocol or the level' in msgs: stat['Wrong protocol/anonymity lvl'].append(p) stat['Connection success'].append(p) if not verbose: continue events_by_ngtr = defaultdict(list) for ngtr, event, runtime in p.get_log(): events_by_ngtr[ngtr].append((event, runtime)) for ngtr, events in sorted( events_by_ngtr.items(), key=lambda item: item[0] ): full_log.append('\t%s' % ngtr) for event, runtime in events: if event.startswith('Initial connection'): full_log.append('\t\t-------------------') else: full_log.append( '\t\t{:<66} Runtime: {:.2f}'.format( event, runtime ) ) for row in full_log: print(row) elif 'Connection: failed' in msgs: stat['Connection failed'].append(p) else: stat['Connection timeout'].append(p) if verbose: print('Stats:') pprint(stat) print('The number of working proxies: %d' % num_working_proxies) for proto, proxies in proxies_by_type.items(): print('%s (%s): %s' % (proto, len(proxies), proxies)) print('Errors:', errors)
constverum/ProxyBroker
proxybroker/api.py
Broker._load
python
async def _load(self, data, check=True): log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done()
Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), }
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/api.py#L290-L305
null
class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxybroker.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxybroker.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, **kwargs ): self._loop = loop or asyncio.get_event_loop() self._proxies = queue or asyncio.Queue(loop=self._loop) self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn, loop=self._loop) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxybroker-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxybroker-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxybroker-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try: proxy = await Proxy.create( *proxy, timeout=self._timeout, resolver=self._resolver, verify_ssl=self._verify_ssl, loop=self._loop, ) except (ResolveError, ValueError): return if not self._is_unique(proxy) or not self._geo_passed(proxy): return if check: await self._push_to_check(proxy) else: self._push_to_result(proxy) def _is_unique(self, proxy): if (proxy.host, proxy.port) not in self.unique_proxies: self.unique_proxies[(proxy.host, proxy.port)] = proxy return True else: return False def _geo_passed(self, proxy): if self._countries and (proxy.geo.code not in self._countries): proxy.log('Location of proxy is outside the given countries list') return False else: return True async def _push_to_check(self, proxy): def _task_done(proxy, f): self._on_check.task_done() if not self._on_check.empty(): self._on_check.get_nowait() try: if f.result(): # proxy is working and its types is equal to the requested self._push_to_result(proxy) except asyncio.CancelledError: pass if self._server and not self._proxies.empty() and self._limit <= 0: log.debug( 'pause. proxies: %s; limit: %s' % (self._proxies.qsize(), self._limit) ) await self._proxies.join() log.debug('unpause. proxies: %s' % self._proxies.qsize()) await self._on_check.put(None) task = asyncio.ensure_future(self._checker.check(proxy)) task.add_done_callback(partial(_task_done, proxy)) self._all_tasks.append(task) def _push_to_result(self, proxy): log.debug('push to result: %r' % proxy) self._proxies.put_nowait(proxy) self._update_limit() def _update_limit(self): self._limit -= 1 if self._limit == 0 and not self._server: self._done() def stop(self): """Stop all tasks, and the local proxy server if it's running.""" self._done() if self._server: self._server.stop() self._server = None log.info('Stop!') def _done(self): log.debug('called done') while self._all_tasks: task = self._all_tasks.pop() if not task.done(): task.cancel() self._push_to_result(None) log.info('Done! Total found proxies: %d' % len(self.unique_proxies)) def show_stats(self, verbose=False, **kwargs): """Show statistics on the found proxies. Useful for debugging, but you can also use if you're interested. :param verbose: Flag indicating whether to print verbose stats .. deprecated:: 0.2.0 Use :attr:`verbose` instead of :attr:`full`. """ if kwargs: verbose = True warnings.warn( '`full` in `show_stats` is deprecated, ' 'use `verbose` instead.', DeprecationWarning, ) found_proxies = self.unique_proxies.values() num_working_proxies = len([p for p in found_proxies if p.is_working]) if not found_proxies: print('Proxy not found') return errors = Counter() for p in found_proxies: errors.update(p.stat['errors']) proxies_by_type = { 'SOCKS5': [], 'SOCKS4': [], 'HTTPS': [], 'HTTP': [], 'CONNECT:80': [], 'CONNECT:25': [], } stat = { 'Wrong country': [], 'Wrong protocol/anonymity lvl': [], 'Connection success': [], 'Connection timeout': [], 'Connection failed': [], } for p in found_proxies: msgs = ' '.join([l[1] for l in p.get_log()]) full_log = [p] for proto in p.types: proxies_by_type[proto].append(p) if 'Location of proxy' in msgs: stat['Wrong country'].append(p) elif 'Connection: success' in msgs: if 'Protocol or the level' in msgs: stat['Wrong protocol/anonymity lvl'].append(p) stat['Connection success'].append(p) if not verbose: continue events_by_ngtr = defaultdict(list) for ngtr, event, runtime in p.get_log(): events_by_ngtr[ngtr].append((event, runtime)) for ngtr, events in sorted( events_by_ngtr.items(), key=lambda item: item[0] ): full_log.append('\t%s' % ngtr) for event, runtime in events: if event.startswith('Initial connection'): full_log.append('\t\t-------------------') else: full_log.append( '\t\t{:<66} Runtime: {:.2f}'.format( event, runtime ) ) for row in full_log: print(row) elif 'Connection: failed' in msgs: stat['Connection failed'].append(p) else: stat['Connection timeout'].append(p) if verbose: print('Stats:') pprint(stat) print('The number of working proxies: %d' % num_working_proxies) for proto, proxies in proxies_by_type.items(): print('%s (%s): %s' % (proto, len(proxies), proxies)) print('Errors:', errors)
constverum/ProxyBroker
proxybroker/api.py
Broker.stop
python
def stop(self): self._done() if self._server: self._server.stop() self._server = None log.info('Stop!')
Stop all tasks, and the local proxy server if it's running.
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/api.py#L409-L415
[ "def _done(self):\n log.debug('called done')\n while self._all_tasks:\n task = self._all_tasks.pop()\n if not task.done():\n task.cancel()\n self._push_to_result(None)\n log.info('Done! Total found proxies: %d' % len(self.unique_proxies))\n" ]
class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxybroker.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxybroker.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, **kwargs ): self._loop = loop or asyncio.get_event_loop() self._proxies = queue or asyncio.Queue(loop=self._loop) self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn, loop=self._loop) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxybroker-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxybroker-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxybroker-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try: proxy = await Proxy.create( *proxy, timeout=self._timeout, resolver=self._resolver, verify_ssl=self._verify_ssl, loop=self._loop, ) except (ResolveError, ValueError): return if not self._is_unique(proxy) or not self._geo_passed(proxy): return if check: await self._push_to_check(proxy) else: self._push_to_result(proxy) def _is_unique(self, proxy): if (proxy.host, proxy.port) not in self.unique_proxies: self.unique_proxies[(proxy.host, proxy.port)] = proxy return True else: return False def _geo_passed(self, proxy): if self._countries and (proxy.geo.code not in self._countries): proxy.log('Location of proxy is outside the given countries list') return False else: return True async def _push_to_check(self, proxy): def _task_done(proxy, f): self._on_check.task_done() if not self._on_check.empty(): self._on_check.get_nowait() try: if f.result(): # proxy is working and its types is equal to the requested self._push_to_result(proxy) except asyncio.CancelledError: pass if self._server and not self._proxies.empty() and self._limit <= 0: log.debug( 'pause. proxies: %s; limit: %s' % (self._proxies.qsize(), self._limit) ) await self._proxies.join() log.debug('unpause. proxies: %s' % self._proxies.qsize()) await self._on_check.put(None) task = asyncio.ensure_future(self._checker.check(proxy)) task.add_done_callback(partial(_task_done, proxy)) self._all_tasks.append(task) def _push_to_result(self, proxy): log.debug('push to result: %r' % proxy) self._proxies.put_nowait(proxy) self._update_limit() def _update_limit(self): self._limit -= 1 if self._limit == 0 and not self._server: self._done() def _done(self): log.debug('called done') while self._all_tasks: task = self._all_tasks.pop() if not task.done(): task.cancel() self._push_to_result(None) log.info('Done! Total found proxies: %d' % len(self.unique_proxies)) def show_stats(self, verbose=False, **kwargs): """Show statistics on the found proxies. Useful for debugging, but you can also use if you're interested. :param verbose: Flag indicating whether to print verbose stats .. deprecated:: 0.2.0 Use :attr:`verbose` instead of :attr:`full`. """ if kwargs: verbose = True warnings.warn( '`full` in `show_stats` is deprecated, ' 'use `verbose` instead.', DeprecationWarning, ) found_proxies = self.unique_proxies.values() num_working_proxies = len([p for p in found_proxies if p.is_working]) if not found_proxies: print('Proxy not found') return errors = Counter() for p in found_proxies: errors.update(p.stat['errors']) proxies_by_type = { 'SOCKS5': [], 'SOCKS4': [], 'HTTPS': [], 'HTTP': [], 'CONNECT:80': [], 'CONNECT:25': [], } stat = { 'Wrong country': [], 'Wrong protocol/anonymity lvl': [], 'Connection success': [], 'Connection timeout': [], 'Connection failed': [], } for p in found_proxies: msgs = ' '.join([l[1] for l in p.get_log()]) full_log = [p] for proto in p.types: proxies_by_type[proto].append(p) if 'Location of proxy' in msgs: stat['Wrong country'].append(p) elif 'Connection: success' in msgs: if 'Protocol or the level' in msgs: stat['Wrong protocol/anonymity lvl'].append(p) stat['Connection success'].append(p) if not verbose: continue events_by_ngtr = defaultdict(list) for ngtr, event, runtime in p.get_log(): events_by_ngtr[ngtr].append((event, runtime)) for ngtr, events in sorted( events_by_ngtr.items(), key=lambda item: item[0] ): full_log.append('\t%s' % ngtr) for event, runtime in events: if event.startswith('Initial connection'): full_log.append('\t\t-------------------') else: full_log.append( '\t\t{:<66} Runtime: {:.2f}'.format( event, runtime ) ) for row in full_log: print(row) elif 'Connection: failed' in msgs: stat['Connection failed'].append(p) else: stat['Connection timeout'].append(p) if verbose: print('Stats:') pprint(stat) print('The number of working proxies: %d' % num_working_proxies) for proto, proxies in proxies_by_type.items(): print('%s (%s): %s' % (proto, len(proxies), proxies)) print('Errors:', errors)
constverum/ProxyBroker
proxybroker/api.py
Broker.show_stats
python
def show_stats(self, verbose=False, **kwargs): if kwargs: verbose = True warnings.warn( '`full` in `show_stats` is deprecated, ' 'use `verbose` instead.', DeprecationWarning, ) found_proxies = self.unique_proxies.values() num_working_proxies = len([p for p in found_proxies if p.is_working]) if not found_proxies: print('Proxy not found') return errors = Counter() for p in found_proxies: errors.update(p.stat['errors']) proxies_by_type = { 'SOCKS5': [], 'SOCKS4': [], 'HTTPS': [], 'HTTP': [], 'CONNECT:80': [], 'CONNECT:25': [], } stat = { 'Wrong country': [], 'Wrong protocol/anonymity lvl': [], 'Connection success': [], 'Connection timeout': [], 'Connection failed': [], } for p in found_proxies: msgs = ' '.join([l[1] for l in p.get_log()]) full_log = [p] for proto in p.types: proxies_by_type[proto].append(p) if 'Location of proxy' in msgs: stat['Wrong country'].append(p) elif 'Connection: success' in msgs: if 'Protocol or the level' in msgs: stat['Wrong protocol/anonymity lvl'].append(p) stat['Connection success'].append(p) if not verbose: continue events_by_ngtr = defaultdict(list) for ngtr, event, runtime in p.get_log(): events_by_ngtr[ngtr].append((event, runtime)) for ngtr, events in sorted( events_by_ngtr.items(), key=lambda item: item[0] ): full_log.append('\t%s' % ngtr) for event, runtime in events: if event.startswith('Initial connection'): full_log.append('\t\t-------------------') else: full_log.append( '\t\t{:<66} Runtime: {:.2f}'.format( event, runtime ) ) for row in full_log: print(row) elif 'Connection: failed' in msgs: stat['Connection failed'].append(p) else: stat['Connection timeout'].append(p) if verbose: print('Stats:') pprint(stat) print('The number of working proxies: %d' % num_working_proxies) for proto, proxies in proxies_by_type.items(): print('%s (%s): %s' % (proto, len(proxies), proxies)) print('Errors:', errors)
Show statistics on the found proxies. Useful for debugging, but you can also use if you're interested. :param verbose: Flag indicating whether to print verbose stats .. deprecated:: 0.2.0 Use :attr:`verbose` instead of :attr:`full`.
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/api.py#L426-L514
null
class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxybroker.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxybroker.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, **kwargs ): self._loop = loop or asyncio.get_event_loop() self._proxies = queue or asyncio.Queue(loop=self._loop) self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn, loop=self._loop) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxybroker-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxybroker-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxybroker-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try: proxy = await Proxy.create( *proxy, timeout=self._timeout, resolver=self._resolver, verify_ssl=self._verify_ssl, loop=self._loop, ) except (ResolveError, ValueError): return if not self._is_unique(proxy) or not self._geo_passed(proxy): return if check: await self._push_to_check(proxy) else: self._push_to_result(proxy) def _is_unique(self, proxy): if (proxy.host, proxy.port) not in self.unique_proxies: self.unique_proxies[(proxy.host, proxy.port)] = proxy return True else: return False def _geo_passed(self, proxy): if self._countries and (proxy.geo.code not in self._countries): proxy.log('Location of proxy is outside the given countries list') return False else: return True async def _push_to_check(self, proxy): def _task_done(proxy, f): self._on_check.task_done() if not self._on_check.empty(): self._on_check.get_nowait() try: if f.result(): # proxy is working and its types is equal to the requested self._push_to_result(proxy) except asyncio.CancelledError: pass if self._server and not self._proxies.empty() and self._limit <= 0: log.debug( 'pause. proxies: %s; limit: %s' % (self._proxies.qsize(), self._limit) ) await self._proxies.join() log.debug('unpause. proxies: %s' % self._proxies.qsize()) await self._on_check.put(None) task = asyncio.ensure_future(self._checker.check(proxy)) task.add_done_callback(partial(_task_done, proxy)) self._all_tasks.append(task) def _push_to_result(self, proxy): log.debug('push to result: %r' % proxy) self._proxies.put_nowait(proxy) self._update_limit() def _update_limit(self): self._limit -= 1 if self._limit == 0 and not self._server: self._done() def stop(self): """Stop all tasks, and the local proxy server if it's running.""" self._done() if self._server: self._server.stop() self._server = None log.info('Stop!') def _done(self): log.debug('called done') while self._all_tasks: task = self._all_tasks.pop() if not task.done(): task.cancel() self._push_to_result(None) log.info('Done! Total found proxies: %d' % len(self.unique_proxies))
constverum/ProxyBroker
examples/only_grab.py
save
python
async def save(proxies, filename): with open(filename, 'w') as f: while True: proxy = await proxies.get() if proxy is None: break f.write('%s:%d\n' % (proxy.host, proxy.port))
Save proxies to a file.
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/examples/only_grab.py#L9-L16
null
"""Gather proxies from the providers without checking and save them to a file.""" import asyncio from proxybroker import Broker def main(): proxies = asyncio.Queue() broker = Broker(proxies) tasks = asyncio.gather( broker.grab(countries=['US', 'GB'], limit=10), save(proxies, filename='proxies.txt'), ) loop = asyncio.get_event_loop() loop.run_until_complete(tasks) if __name__ == '__main__': main()
constverum/ProxyBroker
proxybroker/resolver.py
Resolver.get_ip_info
python
def get_ip_info(ip): # from pprint import pprint try: ipInfo = _mmdb_reader.get(ip) or {} except (maxminddb.errors.InvalidDatabaseError, ValueError): ipInfo = {} code, name = '--', 'Unknown' city_name, region_code, region_name = ('Unknown',) * 3 if 'country' in ipInfo: code = ipInfo['country']['iso_code'] name = ipInfo['country']['names']['en'] elif 'continent' in ipInfo: code = ipInfo['continent']['code'] name = ipInfo['continent']['names']['en'] if 'city' in ipInfo: city_name = ipInfo['city']['names']['en'] if 'subdivisions' in ipInfo: region_code = ipInfo['subdivisions'][0]['iso_code'] region_name = ipInfo['subdivisions'][0]['names']['en'] return GeoData(code, name, region_code, region_name, city_name)
Return geo information about IP address. `code` - ISO country code `name` - Full name of country `region_code` - ISO region code `region_name` - Full name of region `city_name` - Full name of city
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/resolver.py#L57-L85
null
class Resolver: """Async host resolver based on aiodns.""" _cached_hosts = {} _ip_hosts = [ 'https://wtfismyip.com/text', 'http://api.ipify.org/', 'http://ipinfo.io/ip', 'http://ipv4.icanhazip.com/', 'http://myexternalip.com/raw', 'http://ipinfo.io/ip', 'http://ifconfig.io/ip', ] def __init__(self, timeout=5, loop=None): self._timeout = timeout self._loop = loop or asyncio.get_event_loop() self._resolver = aiodns.DNSResolver(loop=self._loop) @staticmethod def host_is_ip(host): """Check a host is IP address.""" # TODO: add IPv6 support try: ipaddress.IPv4Address(host) except ipaddress.AddressValueError: return False else: return True @staticmethod def _pop_random_ip_host(self): host = random.choice(self._ip_hosts) self._ip_hosts.remove(host) return host async def get_real_ext_ip(self): """Return real external IP address.""" while self._ip_hosts: try: timeout = aiohttp.ClientTimeout(total=self._timeout) async with aiohttp.ClientSession( timeout=timeout, loop=self._loop ) as session, session.get(self._pop_random_ip_host()) as resp: ip = await resp.text() except asyncio.TimeoutError: pass else: ip = ip.strip() if self.host_is_ip(ip): log.debug('Real external IP: %s', ip) break else: raise RuntimeError('Could not get the external IP') return ip async def resolve( self, host, port=80, family=None, qtype='A', logging=True ): """Return resolving IP address(es) from host name.""" if self.host_is_ip(host): return host _host = self._cached_hosts.get(host) if _host: return _host resp = await self._resolve(host, qtype) if resp: hosts = [ { 'hostname': host, 'host': r.host, 'port': port, 'family': family, 'proto': socket.IPPROTO_IP, 'flags': socket.AI_NUMERICHOST, } for r in resp ] if family: self._cached_hosts[host] = hosts else: self._cached_hosts[host] = hosts[0]['host'] if logging: log.debug( '%s: Host resolved: %s' % (host, self._cached_hosts[host]) ) else: if logging: log.warning('%s: Could not resolve host' % host) return self._cached_hosts.get(host) async def _resolve(self, host, qtype): try: resp = await asyncio.wait_for( self._resolver.query(host, qtype), timeout=self._timeout ) except (aiodns.error.DNSError, asyncio.TimeoutError): raise ResolveError else: return resp
constverum/ProxyBroker
proxybroker/resolver.py
Resolver.get_real_ext_ip
python
async def get_real_ext_ip(self): while self._ip_hosts: try: timeout = aiohttp.ClientTimeout(total=self._timeout) async with aiohttp.ClientSession( timeout=timeout, loop=self._loop ) as session, session.get(self._pop_random_ip_host()) as resp: ip = await resp.text() except asyncio.TimeoutError: pass else: ip = ip.strip() if self.host_is_ip(ip): log.debug('Real external IP: %s', ip) break else: raise RuntimeError('Could not get the external IP') return ip
Return real external IP address.
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/resolver.py#L92-L110
[ "def host_is_ip(host):\n \"\"\"Check a host is IP address.\"\"\"\n # TODO: add IPv6 support\n try:\n ipaddress.IPv4Address(host)\n except ipaddress.AddressValueError:\n return False\n else:\n return True\n", "def _pop_random_ip_host(self):\n host = random.choice(self._ip_hosts)\n self._ip_hosts.remove(host)\n return host\n" ]
class Resolver: """Async host resolver based on aiodns.""" _cached_hosts = {} _ip_hosts = [ 'https://wtfismyip.com/text', 'http://api.ipify.org/', 'http://ipinfo.io/ip', 'http://ipv4.icanhazip.com/', 'http://myexternalip.com/raw', 'http://ipinfo.io/ip', 'http://ifconfig.io/ip', ] def __init__(self, timeout=5, loop=None): self._timeout = timeout self._loop = loop or asyncio.get_event_loop() self._resolver = aiodns.DNSResolver(loop=self._loop) @staticmethod def host_is_ip(host): """Check a host is IP address.""" # TODO: add IPv6 support try: ipaddress.IPv4Address(host) except ipaddress.AddressValueError: return False else: return True @staticmethod def get_ip_info(ip): """Return geo information about IP address. `code` - ISO country code `name` - Full name of country `region_code` - ISO region code `region_name` - Full name of region `city_name` - Full name of city """ # from pprint import pprint try: ipInfo = _mmdb_reader.get(ip) or {} except (maxminddb.errors.InvalidDatabaseError, ValueError): ipInfo = {} code, name = '--', 'Unknown' city_name, region_code, region_name = ('Unknown',) * 3 if 'country' in ipInfo: code = ipInfo['country']['iso_code'] name = ipInfo['country']['names']['en'] elif 'continent' in ipInfo: code = ipInfo['continent']['code'] name = ipInfo['continent']['names']['en'] if 'city' in ipInfo: city_name = ipInfo['city']['names']['en'] if 'subdivisions' in ipInfo: region_code = ipInfo['subdivisions'][0]['iso_code'] region_name = ipInfo['subdivisions'][0]['names']['en'] return GeoData(code, name, region_code, region_name, city_name) def _pop_random_ip_host(self): host = random.choice(self._ip_hosts) self._ip_hosts.remove(host) return host async def resolve( self, host, port=80, family=None, qtype='A', logging=True ): """Return resolving IP address(es) from host name.""" if self.host_is_ip(host): return host _host = self._cached_hosts.get(host) if _host: return _host resp = await self._resolve(host, qtype) if resp: hosts = [ { 'hostname': host, 'host': r.host, 'port': port, 'family': family, 'proto': socket.IPPROTO_IP, 'flags': socket.AI_NUMERICHOST, } for r in resp ] if family: self._cached_hosts[host] = hosts else: self._cached_hosts[host] = hosts[0]['host'] if logging: log.debug( '%s: Host resolved: %s' % (host, self._cached_hosts[host]) ) else: if logging: log.warning('%s: Could not resolve host' % host) return self._cached_hosts.get(host) async def _resolve(self, host, qtype): try: resp = await asyncio.wait_for( self._resolver.query(host, qtype), timeout=self._timeout ) except (aiodns.error.DNSError, asyncio.TimeoutError): raise ResolveError else: return resp
constverum/ProxyBroker
proxybroker/resolver.py
Resolver.resolve
python
async def resolve( self, host, port=80, family=None, qtype='A', logging=True ): if self.host_is_ip(host): return host _host = self._cached_hosts.get(host) if _host: return _host resp = await self._resolve(host, qtype) if resp: hosts = [ { 'hostname': host, 'host': r.host, 'port': port, 'family': family, 'proto': socket.IPPROTO_IP, 'flags': socket.AI_NUMERICHOST, } for r in resp ] if family: self._cached_hosts[host] = hosts else: self._cached_hosts[host] = hosts[0]['host'] if logging: log.debug( '%s: Host resolved: %s' % (host, self._cached_hosts[host]) ) else: if logging: log.warning('%s: Could not resolve host' % host) return self._cached_hosts.get(host)
Return resolving IP address(es) from host name.
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/resolver.py#L112-L148
[ "def host_is_ip(host):\n \"\"\"Check a host is IP address.\"\"\"\n # TODO: add IPv6 support\n try:\n ipaddress.IPv4Address(host)\n except ipaddress.AddressValueError:\n return False\n else:\n return True\n" ]
class Resolver: """Async host resolver based on aiodns.""" _cached_hosts = {} _ip_hosts = [ 'https://wtfismyip.com/text', 'http://api.ipify.org/', 'http://ipinfo.io/ip', 'http://ipv4.icanhazip.com/', 'http://myexternalip.com/raw', 'http://ipinfo.io/ip', 'http://ifconfig.io/ip', ] def __init__(self, timeout=5, loop=None): self._timeout = timeout self._loop = loop or asyncio.get_event_loop() self._resolver = aiodns.DNSResolver(loop=self._loop) @staticmethod def host_is_ip(host): """Check a host is IP address.""" # TODO: add IPv6 support try: ipaddress.IPv4Address(host) except ipaddress.AddressValueError: return False else: return True @staticmethod def get_ip_info(ip): """Return geo information about IP address. `code` - ISO country code `name` - Full name of country `region_code` - ISO region code `region_name` - Full name of region `city_name` - Full name of city """ # from pprint import pprint try: ipInfo = _mmdb_reader.get(ip) or {} except (maxminddb.errors.InvalidDatabaseError, ValueError): ipInfo = {} code, name = '--', 'Unknown' city_name, region_code, region_name = ('Unknown',) * 3 if 'country' in ipInfo: code = ipInfo['country']['iso_code'] name = ipInfo['country']['names']['en'] elif 'continent' in ipInfo: code = ipInfo['continent']['code'] name = ipInfo['continent']['names']['en'] if 'city' in ipInfo: city_name = ipInfo['city']['names']['en'] if 'subdivisions' in ipInfo: region_code = ipInfo['subdivisions'][0]['iso_code'] region_name = ipInfo['subdivisions'][0]['names']['en'] return GeoData(code, name, region_code, region_name, city_name) def _pop_random_ip_host(self): host = random.choice(self._ip_hosts) self._ip_hosts.remove(host) return host async def get_real_ext_ip(self): """Return real external IP address.""" while self._ip_hosts: try: timeout = aiohttp.ClientTimeout(total=self._timeout) async with aiohttp.ClientSession( timeout=timeout, loop=self._loop ) as session, session.get(self._pop_random_ip_host()) as resp: ip = await resp.text() except asyncio.TimeoutError: pass else: ip = ip.strip() if self.host_is_ip(ip): log.debug('Real external IP: %s', ip) break else: raise RuntimeError('Could not get the external IP') return ip async def _resolve(self, host, qtype): try: resp = await asyncio.wait_for( self._resolver.query(host, qtype), timeout=self._timeout ) except (aiodns.error.DNSError, asyncio.TimeoutError): raise ResolveError else: return resp
constverum/ProxyBroker
proxybroker/proxy.py
Proxy.create
python
async def create(cls, host, *args, **kwargs): """Asynchronously create a :class:`Proxy` object. :param str host: A passed host can be a domain or IP address. If the host is a domain, try to resolve it :param str \*args: (optional) Positional arguments that :class:`Proxy` takes :param str \*\*kwargs: (optional) Keyword arguments that :class:`Proxy` takes :return: :class:`Proxy` object :rtype: proxybroker.Proxy :raises ResolveError: If could not resolve the host :raises ValueError: If the port > 65535 """ # noqa: W605 loop = kwargs.pop('loop', None) resolver = kwargs.pop('resolver', Resolver(loop=loop)) try: _host = await resolver.resolve(host) self = cls(_host, *args, **kwargs) except (ResolveError, ValueError) as e: log.error('%s:%s: Error at creating: %s' % (host, args[0], e)) raise return self
Asynchronously create a :class:`Proxy` object. :param str host: A passed host can be a domain or IP address. If the host is a domain, try to resolve it :param str \*args: (optional) Positional arguments that :class:`Proxy` takes :param str \*\*kwargs: (optional) Keyword arguments that :class:`Proxy` takes :return: :class:`Proxy` object :rtype: proxybroker.Proxy :raises ResolveError: If could not resolve the host :raises ValueError: If the port > 65535
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/proxy.py#L41-L65
null
class Proxy: """Proxy. :param str host: IP address of the proxy :param int port: Port of the proxy :param tuple types: (optional) List of types (protocols) which may be supported by the proxy and which can be checked to work with the proxy :param int timeout: (optional) Timeout of a connection and receive a response in seconds :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :raises ValueError: If the host not is IP address, or if the port > 65535 """ @classmethod def __init__( self, host=None, port=None, types=(), timeout=8, verify_ssl=False ): self.host = host if not Resolver.host_is_ip(self.host): raise ValueError( 'The host of proxy should be the IP address. ' 'Try Proxy.create() if the host is a domain' ) self.port = int(port) if self.port > 65535: raise ValueError('The port of proxy cannot be greater than 65535') self.expected_types = set(types) & { 'HTTP', 'HTTPS', 'CONNECT:80', 'CONNECT:25', 'SOCKS4', 'SOCKS5', } self._timeout = timeout self._ssl_context = ( True if verify_ssl else _ssl._create_unverified_context() ) self._types = {} self._is_working = False self.stat = {'requests': 0, 'errors': Counter()} self._ngtr = None self._geo = Resolver.get_ip_info(self.host) self._log = [] self._runtimes = [] self._schemes = () self._closed = True self._reader = {'conn': None, 'ssl': None} self._writer = {'conn': None, 'ssl': None} def __repr__(self): # <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080> tpinfo = [] order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731 for tp, lvl in sorted(self.types.items(), key=order): s = '{tp}: {lvl}' if lvl else '{tp}' s = s.format(tp=tp, lvl=lvl) tpinfo.append(s) tpinfo = ', '.join(tpinfo) return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format( code=self._geo.code, types=tpinfo, host=self.host, port=self.port, avg=self.avg_resp_time, ) @property def types(self): """Types (protocols) supported by the proxy. | Where key is type, value is level of anonymity (only for HTTP, for other types level always is None). | Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 | Available levels: Transparent, Anonymous, High. :rtype: dict """ return self._types @property def is_working(self): """True if the proxy is working, False otherwise. :rtype: bool """ return self._is_working @is_working.setter def is_working(self, val): self._is_working = val @property def writer(self): return self._writer.get('ssl') or self._writer.get('conn') @property def reader(self): return self._reader.get('ssl') or self._reader.get('conn') @property def priority(self): return (self.error_rate, self.avg_resp_time) @property def error_rate(self): """Error rate: from 0 to 1. For example: 0.7 = 70% requests ends with error. :rtype: float .. versionadded:: 0.2.0 """ if not self.stat['requests']: return 0 return round( sum(self.stat['errors'].values()) / self.stat['requests'], 2 ) @property def schemes(self): """Return supported schemes.""" if not self._schemes: _schemes = [] if self.types.keys() & _HTTP_PROTOS: _schemes.append('HTTP') if self.types.keys() & _HTTPS_PROTOS: _schemes.append('HTTPS') self._schemes = tuple(_schemes) return self._schemes @property def avg_resp_time(self): """The average connection/response time. :rtype: float """ if not self._runtimes: return 0 return round(sum(self._runtimes) / len(self._runtimes), 2) @property def avgRespTime(self): """ .. deprecated:: 2.0 Use :attr:`avg_resp_time` instead. """ warnings.warn( '`avgRespTime` property is deprecated, ' 'use `avg_resp_time` instead.', DeprecationWarning, ) return self.avg_resp_time @property def geo(self): """Geo information about IP address of the proxy. :return: Named tuple with fields: * ``code`` - ISO country code * ``name`` - Full name of country * ``region_code`` - ISO region code * ``region_name`` - Full name of region * ``city_name`` - Full name of city :rtype: collections.namedtuple .. versionchanged:: 0.2.0 In previous versions return a dictionary, now named tuple. """ return self._geo @property def ngtr(self): return self._ngtr @ngtr.setter def ngtr(self, proto): self._ngtr = NGTRS[proto](self) def as_json(self): """Return the proxy's properties in JSON format. :rtype: dict """ info = { 'host': self.host, 'port': self.port, 'geo': { 'country': {'code': self._geo.code, 'name': self._geo.name}, 'region': { 'code': self._geo.region_code, 'name': self._geo.region_name, }, 'city': self._geo.city_name, }, 'types': [], 'avg_resp_time': self.avg_resp_time, 'error_rate': self.error_rate, } order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731 for tp, lvl in sorted(self.types.items(), key=order): info['types'].append({'type': tp, 'level': lvl or ''}) return info def log(self, msg, stime=0, err=None): ngtr = self.ngtr.name if self.ngtr else 'INFO' runtime = time.time() - stime if stime else 0 log.debug( '{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format( h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime ) ) trunc = '...' if len(msg) > 58 else '' msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc) self._log.append((ngtr, msg, runtime)) if err: self.stat['errors'][err.errmsg] += 1 if runtime and 'timeout' not in msg: self._runtimes.append(runtime) def get_log(self): """Proxy log. :return: The proxy log in format: (negotaitor, msg, runtime) :rtype: tuple .. versionadded:: 0.2.0 """ return self._log async def connect(self, ssl=False): err = None msg = '%s' % 'SSL: ' if ssl else '' stime = time.time() self.log('%sInitial connection' % msg) try: if ssl: _type = 'ssl' sock = self._writer['conn'].get_extra_info('socket') params = { 'ssl': self._ssl_context, 'sock': sock, 'server_hostname': self.host, } else: _type = 'conn' params = {'host': self.host, 'port': self.port} self._reader[_type], self._writer[_type] = await asyncio.wait_for( asyncio.open_connection(**params), timeout=self._timeout ) except asyncio.TimeoutError: msg += 'Connection: timeout' err = ProxyTimeoutError(msg) raise err except (ConnectionRefusedError, OSError, _ssl.SSLError): msg += 'Connection: failed' err = ProxyConnError(msg) raise err # except asyncio.CancelledError: # log.debug('Cancelled in proxy.connect()') # raise ProxyConnError() else: msg += 'Connection: success' self._closed = False finally: self.stat['requests'] += 1 self.log(msg, stime, err=err) def close(self): if self._closed: return self._closed = True if self.writer: # try: self.writer.close() # except RuntimeError: # print('Try proxy.close() when loop is closed:', # asyncio.get_event_loop()._closed) self._reader = {'conn': None, 'ssl': None} self._writer = {'conn': None, 'ssl': None} self.log('Connection: closed') self._ngtr = None async def send(self, req): msg, err = '', None _req = req.encode() if not isinstance(req, bytes) else req try: self.writer.write(_req) await self.writer.drain() except ConnectionResetError: msg = '; Sending: failed' err = ProxySendError(msg) raise err finally: self.log('Request: %s%s' % (req, msg), err=err) async def recv(self, length=0, head_only=False): resp, msg, err = b'', '', None stime = time.time() try: resp = await asyncio.wait_for( self._recv(length, head_only), timeout=self._timeout ) except asyncio.TimeoutError: msg = 'Received: timeout' err = ProxyTimeoutError(msg) raise err except (ConnectionResetError, OSError): msg = 'Received: failed' # (connection is reset by the peer) err = ProxyRecvError(msg) raise err else: msg = 'Received: %s bytes' % len(resp) if not resp: err = ProxyEmptyRecvError(msg) raise err finally: if resp: msg += ': %s' % resp[:12] self.log(msg, stime, err=err) return resp async def _recv(self, length=0, head_only=False): resp = b'' if length: try: resp = await self.reader.readexactly(length) except asyncio.IncompleteReadError as e: resp = e.partial else: body_size, body_recv, chunked = 0, 0, None while not self.reader.at_eof(): line = await self.reader.readline() resp += line if body_size: body_recv += len(line) if body_recv >= body_size: break elif chunked and line == b'0\r\n': break elif not body_size and line == b'\r\n': if head_only: break headers = parse_headers(resp) body_size = int(headers.get('Content-Length', 0)) if not body_size: chunked = headers.get('Transfer-Encoding') == 'chunked' return resp
constverum/ProxyBroker
proxybroker/proxy.py
Proxy.error_rate
python
def error_rate(self): if not self.stat['requests']: return 0 return round( sum(self.stat['errors'].values()) / self.stat['requests'], 2 )
Error rate: from 0 to 1. For example: 0.7 = 70% requests ends with error. :rtype: float .. versionadded:: 0.2.0
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/proxy.py#L160-L173
null
class Proxy: """Proxy. :param str host: IP address of the proxy :param int port: Port of the proxy :param tuple types: (optional) List of types (protocols) which may be supported by the proxy and which can be checked to work with the proxy :param int timeout: (optional) Timeout of a connection and receive a response in seconds :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :raises ValueError: If the host not is IP address, or if the port > 65535 """ @classmethod async def create(cls, host, *args, **kwargs): """Asynchronously create a :class:`Proxy` object. :param str host: A passed host can be a domain or IP address. If the host is a domain, try to resolve it :param str \*args: (optional) Positional arguments that :class:`Proxy` takes :param str \*\*kwargs: (optional) Keyword arguments that :class:`Proxy` takes :return: :class:`Proxy` object :rtype: proxybroker.Proxy :raises ResolveError: If could not resolve the host :raises ValueError: If the port > 65535 """ # noqa: W605 loop = kwargs.pop('loop', None) resolver = kwargs.pop('resolver', Resolver(loop=loop)) try: _host = await resolver.resolve(host) self = cls(_host, *args, **kwargs) except (ResolveError, ValueError) as e: log.error('%s:%s: Error at creating: %s' % (host, args[0], e)) raise return self def __init__( self, host=None, port=None, types=(), timeout=8, verify_ssl=False ): self.host = host if not Resolver.host_is_ip(self.host): raise ValueError( 'The host of proxy should be the IP address. ' 'Try Proxy.create() if the host is a domain' ) self.port = int(port) if self.port > 65535: raise ValueError('The port of proxy cannot be greater than 65535') self.expected_types = set(types) & { 'HTTP', 'HTTPS', 'CONNECT:80', 'CONNECT:25', 'SOCKS4', 'SOCKS5', } self._timeout = timeout self._ssl_context = ( True if verify_ssl else _ssl._create_unverified_context() ) self._types = {} self._is_working = False self.stat = {'requests': 0, 'errors': Counter()} self._ngtr = None self._geo = Resolver.get_ip_info(self.host) self._log = [] self._runtimes = [] self._schemes = () self._closed = True self._reader = {'conn': None, 'ssl': None} self._writer = {'conn': None, 'ssl': None} def __repr__(self): # <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080> tpinfo = [] order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731 for tp, lvl in sorted(self.types.items(), key=order): s = '{tp}: {lvl}' if lvl else '{tp}' s = s.format(tp=tp, lvl=lvl) tpinfo.append(s) tpinfo = ', '.join(tpinfo) return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format( code=self._geo.code, types=tpinfo, host=self.host, port=self.port, avg=self.avg_resp_time, ) @property def types(self): """Types (protocols) supported by the proxy. | Where key is type, value is level of anonymity (only for HTTP, for other types level always is None). | Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 | Available levels: Transparent, Anonymous, High. :rtype: dict """ return self._types @property def is_working(self): """True if the proxy is working, False otherwise. :rtype: bool """ return self._is_working @is_working.setter def is_working(self, val): self._is_working = val @property def writer(self): return self._writer.get('ssl') or self._writer.get('conn') @property def reader(self): return self._reader.get('ssl') or self._reader.get('conn') @property def priority(self): return (self.error_rate, self.avg_resp_time) @property @property def schemes(self): """Return supported schemes.""" if not self._schemes: _schemes = [] if self.types.keys() & _HTTP_PROTOS: _schemes.append('HTTP') if self.types.keys() & _HTTPS_PROTOS: _schemes.append('HTTPS') self._schemes = tuple(_schemes) return self._schemes @property def avg_resp_time(self): """The average connection/response time. :rtype: float """ if not self._runtimes: return 0 return round(sum(self._runtimes) / len(self._runtimes), 2) @property def avgRespTime(self): """ .. deprecated:: 2.0 Use :attr:`avg_resp_time` instead. """ warnings.warn( '`avgRespTime` property is deprecated, ' 'use `avg_resp_time` instead.', DeprecationWarning, ) return self.avg_resp_time @property def geo(self): """Geo information about IP address of the proxy. :return: Named tuple with fields: * ``code`` - ISO country code * ``name`` - Full name of country * ``region_code`` - ISO region code * ``region_name`` - Full name of region * ``city_name`` - Full name of city :rtype: collections.namedtuple .. versionchanged:: 0.2.0 In previous versions return a dictionary, now named tuple. """ return self._geo @property def ngtr(self): return self._ngtr @ngtr.setter def ngtr(self, proto): self._ngtr = NGTRS[proto](self) def as_json(self): """Return the proxy's properties in JSON format. :rtype: dict """ info = { 'host': self.host, 'port': self.port, 'geo': { 'country': {'code': self._geo.code, 'name': self._geo.name}, 'region': { 'code': self._geo.region_code, 'name': self._geo.region_name, }, 'city': self._geo.city_name, }, 'types': [], 'avg_resp_time': self.avg_resp_time, 'error_rate': self.error_rate, } order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731 for tp, lvl in sorted(self.types.items(), key=order): info['types'].append({'type': tp, 'level': lvl or ''}) return info def log(self, msg, stime=0, err=None): ngtr = self.ngtr.name if self.ngtr else 'INFO' runtime = time.time() - stime if stime else 0 log.debug( '{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format( h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime ) ) trunc = '...' if len(msg) > 58 else '' msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc) self._log.append((ngtr, msg, runtime)) if err: self.stat['errors'][err.errmsg] += 1 if runtime and 'timeout' not in msg: self._runtimes.append(runtime) def get_log(self): """Proxy log. :return: The proxy log in format: (negotaitor, msg, runtime) :rtype: tuple .. versionadded:: 0.2.0 """ return self._log async def connect(self, ssl=False): err = None msg = '%s' % 'SSL: ' if ssl else '' stime = time.time() self.log('%sInitial connection' % msg) try: if ssl: _type = 'ssl' sock = self._writer['conn'].get_extra_info('socket') params = { 'ssl': self._ssl_context, 'sock': sock, 'server_hostname': self.host, } else: _type = 'conn' params = {'host': self.host, 'port': self.port} self._reader[_type], self._writer[_type] = await asyncio.wait_for( asyncio.open_connection(**params), timeout=self._timeout ) except asyncio.TimeoutError: msg += 'Connection: timeout' err = ProxyTimeoutError(msg) raise err except (ConnectionRefusedError, OSError, _ssl.SSLError): msg += 'Connection: failed' err = ProxyConnError(msg) raise err # except asyncio.CancelledError: # log.debug('Cancelled in proxy.connect()') # raise ProxyConnError() else: msg += 'Connection: success' self._closed = False finally: self.stat['requests'] += 1 self.log(msg, stime, err=err) def close(self): if self._closed: return self._closed = True if self.writer: # try: self.writer.close() # except RuntimeError: # print('Try proxy.close() when loop is closed:', # asyncio.get_event_loop()._closed) self._reader = {'conn': None, 'ssl': None} self._writer = {'conn': None, 'ssl': None} self.log('Connection: closed') self._ngtr = None async def send(self, req): msg, err = '', None _req = req.encode() if not isinstance(req, bytes) else req try: self.writer.write(_req) await self.writer.drain() except ConnectionResetError: msg = '; Sending: failed' err = ProxySendError(msg) raise err finally: self.log('Request: %s%s' % (req, msg), err=err) async def recv(self, length=0, head_only=False): resp, msg, err = b'', '', None stime = time.time() try: resp = await asyncio.wait_for( self._recv(length, head_only), timeout=self._timeout ) except asyncio.TimeoutError: msg = 'Received: timeout' err = ProxyTimeoutError(msg) raise err except (ConnectionResetError, OSError): msg = 'Received: failed' # (connection is reset by the peer) err = ProxyRecvError(msg) raise err else: msg = 'Received: %s bytes' % len(resp) if not resp: err = ProxyEmptyRecvError(msg) raise err finally: if resp: msg += ': %s' % resp[:12] self.log(msg, stime, err=err) return resp async def _recv(self, length=0, head_only=False): resp = b'' if length: try: resp = await self.reader.readexactly(length) except asyncio.IncompleteReadError as e: resp = e.partial else: body_size, body_recv, chunked = 0, 0, None while not self.reader.at_eof(): line = await self.reader.readline() resp += line if body_size: body_recv += len(line) if body_recv >= body_size: break elif chunked and line == b'0\r\n': break elif not body_size and line == b'\r\n': if head_only: break headers = parse_headers(resp) body_size = int(headers.get('Content-Length', 0)) if not body_size: chunked = headers.get('Transfer-Encoding') == 'chunked' return resp
constverum/ProxyBroker
proxybroker/proxy.py
Proxy.schemes
python
def schemes(self): if not self._schemes: _schemes = [] if self.types.keys() & _HTTP_PROTOS: _schemes.append('HTTP') if self.types.keys() & _HTTPS_PROTOS: _schemes.append('HTTPS') self._schemes = tuple(_schemes) return self._schemes
Return supported schemes.
train
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/proxy.py#L176-L185
null
class Proxy: """Proxy. :param str host: IP address of the proxy :param int port: Port of the proxy :param tuple types: (optional) List of types (protocols) which may be supported by the proxy and which can be checked to work with the proxy :param int timeout: (optional) Timeout of a connection and receive a response in seconds :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :raises ValueError: If the host not is IP address, or if the port > 65535 """ @classmethod async def create(cls, host, *args, **kwargs): """Asynchronously create a :class:`Proxy` object. :param str host: A passed host can be a domain or IP address. If the host is a domain, try to resolve it :param str \*args: (optional) Positional arguments that :class:`Proxy` takes :param str \*\*kwargs: (optional) Keyword arguments that :class:`Proxy` takes :return: :class:`Proxy` object :rtype: proxybroker.Proxy :raises ResolveError: If could not resolve the host :raises ValueError: If the port > 65535 """ # noqa: W605 loop = kwargs.pop('loop', None) resolver = kwargs.pop('resolver', Resolver(loop=loop)) try: _host = await resolver.resolve(host) self = cls(_host, *args, **kwargs) except (ResolveError, ValueError) as e: log.error('%s:%s: Error at creating: %s' % (host, args[0], e)) raise return self def __init__( self, host=None, port=None, types=(), timeout=8, verify_ssl=False ): self.host = host if not Resolver.host_is_ip(self.host): raise ValueError( 'The host of proxy should be the IP address. ' 'Try Proxy.create() if the host is a domain' ) self.port = int(port) if self.port > 65535: raise ValueError('The port of proxy cannot be greater than 65535') self.expected_types = set(types) & { 'HTTP', 'HTTPS', 'CONNECT:80', 'CONNECT:25', 'SOCKS4', 'SOCKS5', } self._timeout = timeout self._ssl_context = ( True if verify_ssl else _ssl._create_unverified_context() ) self._types = {} self._is_working = False self.stat = {'requests': 0, 'errors': Counter()} self._ngtr = None self._geo = Resolver.get_ip_info(self.host) self._log = [] self._runtimes = [] self._schemes = () self._closed = True self._reader = {'conn': None, 'ssl': None} self._writer = {'conn': None, 'ssl': None} def __repr__(self): # <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080> tpinfo = [] order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731 for tp, lvl in sorted(self.types.items(), key=order): s = '{tp}: {lvl}' if lvl else '{tp}' s = s.format(tp=tp, lvl=lvl) tpinfo.append(s) tpinfo = ', '.join(tpinfo) return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format( code=self._geo.code, types=tpinfo, host=self.host, port=self.port, avg=self.avg_resp_time, ) @property def types(self): """Types (protocols) supported by the proxy. | Where key is type, value is level of anonymity (only for HTTP, for other types level always is None). | Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 | Available levels: Transparent, Anonymous, High. :rtype: dict """ return self._types @property def is_working(self): """True if the proxy is working, False otherwise. :rtype: bool """ return self._is_working @is_working.setter def is_working(self, val): self._is_working = val @property def writer(self): return self._writer.get('ssl') or self._writer.get('conn') @property def reader(self): return self._reader.get('ssl') or self._reader.get('conn') @property def priority(self): return (self.error_rate, self.avg_resp_time) @property def error_rate(self): """Error rate: from 0 to 1. For example: 0.7 = 70% requests ends with error. :rtype: float .. versionadded:: 0.2.0 """ if not self.stat['requests']: return 0 return round( sum(self.stat['errors'].values()) / self.stat['requests'], 2 ) @property @property def avg_resp_time(self): """The average connection/response time. :rtype: float """ if not self._runtimes: return 0 return round(sum(self._runtimes) / len(self._runtimes), 2) @property def avgRespTime(self): """ .. deprecated:: 2.0 Use :attr:`avg_resp_time` instead. """ warnings.warn( '`avgRespTime` property is deprecated, ' 'use `avg_resp_time` instead.', DeprecationWarning, ) return self.avg_resp_time @property def geo(self): """Geo information about IP address of the proxy. :return: Named tuple with fields: * ``code`` - ISO country code * ``name`` - Full name of country * ``region_code`` - ISO region code * ``region_name`` - Full name of region * ``city_name`` - Full name of city :rtype: collections.namedtuple .. versionchanged:: 0.2.0 In previous versions return a dictionary, now named tuple. """ return self._geo @property def ngtr(self): return self._ngtr @ngtr.setter def ngtr(self, proto): self._ngtr = NGTRS[proto](self) def as_json(self): """Return the proxy's properties in JSON format. :rtype: dict """ info = { 'host': self.host, 'port': self.port, 'geo': { 'country': {'code': self._geo.code, 'name': self._geo.name}, 'region': { 'code': self._geo.region_code, 'name': self._geo.region_name, }, 'city': self._geo.city_name, }, 'types': [], 'avg_resp_time': self.avg_resp_time, 'error_rate': self.error_rate, } order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731 for tp, lvl in sorted(self.types.items(), key=order): info['types'].append({'type': tp, 'level': lvl or ''}) return info def log(self, msg, stime=0, err=None): ngtr = self.ngtr.name if self.ngtr else 'INFO' runtime = time.time() - stime if stime else 0 log.debug( '{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format( h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime ) ) trunc = '...' if len(msg) > 58 else '' msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc) self._log.append((ngtr, msg, runtime)) if err: self.stat['errors'][err.errmsg] += 1 if runtime and 'timeout' not in msg: self._runtimes.append(runtime) def get_log(self): """Proxy log. :return: The proxy log in format: (negotaitor, msg, runtime) :rtype: tuple .. versionadded:: 0.2.0 """ return self._log async def connect(self, ssl=False): err = None msg = '%s' % 'SSL: ' if ssl else '' stime = time.time() self.log('%sInitial connection' % msg) try: if ssl: _type = 'ssl' sock = self._writer['conn'].get_extra_info('socket') params = { 'ssl': self._ssl_context, 'sock': sock, 'server_hostname': self.host, } else: _type = 'conn' params = {'host': self.host, 'port': self.port} self._reader[_type], self._writer[_type] = await asyncio.wait_for( asyncio.open_connection(**params), timeout=self._timeout ) except asyncio.TimeoutError: msg += 'Connection: timeout' err = ProxyTimeoutError(msg) raise err except (ConnectionRefusedError, OSError, _ssl.SSLError): msg += 'Connection: failed' err = ProxyConnError(msg) raise err # except asyncio.CancelledError: # log.debug('Cancelled in proxy.connect()') # raise ProxyConnError() else: msg += 'Connection: success' self._closed = False finally: self.stat['requests'] += 1 self.log(msg, stime, err=err) def close(self): if self._closed: return self._closed = True if self.writer: # try: self.writer.close() # except RuntimeError: # print('Try proxy.close() when loop is closed:', # asyncio.get_event_loop()._closed) self._reader = {'conn': None, 'ssl': None} self._writer = {'conn': None, 'ssl': None} self.log('Connection: closed') self._ngtr = None async def send(self, req): msg, err = '', None _req = req.encode() if not isinstance(req, bytes) else req try: self.writer.write(_req) await self.writer.drain() except ConnectionResetError: msg = '; Sending: failed' err = ProxySendError(msg) raise err finally: self.log('Request: %s%s' % (req, msg), err=err) async def recv(self, length=0, head_only=False): resp, msg, err = b'', '', None stime = time.time() try: resp = await asyncio.wait_for( self._recv(length, head_only), timeout=self._timeout ) except asyncio.TimeoutError: msg = 'Received: timeout' err = ProxyTimeoutError(msg) raise err except (ConnectionResetError, OSError): msg = 'Received: failed' # (connection is reset by the peer) err = ProxyRecvError(msg) raise err else: msg = 'Received: %s bytes' % len(resp) if not resp: err = ProxyEmptyRecvError(msg) raise err finally: if resp: msg += ': %s' % resp[:12] self.log(msg, stime, err=err) return resp async def _recv(self, length=0, head_only=False): resp = b'' if length: try: resp = await self.reader.readexactly(length) except asyncio.IncompleteReadError as e: resp = e.partial else: body_size, body_recv, chunked = 0, 0, None while not self.reader.at_eof(): line = await self.reader.readline() resp += line if body_size: body_recv += len(line) if body_recv >= body_size: break elif chunked and line == b'0\r\n': break elif not body_size and line == b'\r\n': if head_only: break headers = parse_headers(resp) body_size = int(headers.get('Content-Length', 0)) if not body_size: chunked = headers.get('Transfer-Encoding') == 'chunked' return resp