repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
projecthamster/hamster-lib | hamster_lib/helpers/helpers.py | parse_raw_fact | python | def parse_raw_fact(raw_fact):
def at_split(string):
"""
Return everything in front of the (leftmost) '@'-symbol, if it was used.
Args:
string (str): The string to be parsed.
Returns:
tuple: (front, back) representing the substrings before and after the
most left ``@`` symbol. If no such symbol was present at all,
``back=None``. Both substrings have been trimmed of any leading
and trailing whitespace.
Note:
If our string contains multiple ``@`` symbols, all but the most left
one will be treated as part of the regular ``back`` string.
This allows for usage of the symbol in descriptions, categories and tags.
Also note that *no tags are extracted* any tags included will be considered
part of the ``category`` string. We are likely to remove this parsing function
in ``0.14.0`` in favour of a regex based solution so we will not spend
time on tags for now
"""
result = string.split('@', 1)
length = len(result)
if length == 1:
front, back = result[0].strip(), None
else:
front, back = result
front, back = front.strip(), back.strip()
return (front, back)
def comma_split(string):
"""
Split string at the most left comma.
Args:
string (str): String to be processed. At this stage this should
look something like ``<Category> and <tags>, <Description>
Returns
tuple: (category_and_tags, description). Both substrings have their
leading/trailing whitespace removed.
``category_and_tags`` may include >=0 tags indicated by a leading ``#``.
As we have used the most left ``,`` to separate both substrings that
means that categories and tags can not contain any ``,`` but the
description text may contain as many as wished.
"""
result = string.split(',', 1)
length = len(result)
if length == 1:
category, description = result[0].strip(), None
else:
category, description = tuple(result)
category, description = category.strip(), description.strip()
return (category.strip(), description)
time_info, rest = time_helpers.extract_time_info(raw_fact)
activity_name, back = at_split(rest)
if back:
category_name, description = comma_split(back)
else:
category_name, description = None, None
return {
'timeinfo': time_info,
'category': category_name,
'activity': activity_name,
'description': description,
} | Extract semantically meaningful sub-components from a ``raw fact`` text.
Args:
raw_fact (text_type): ``raw fact`` text to be parsed.
Returns:
dict: dict with sub-components as values. | train | https://github.com/projecthamster/hamster-lib/blob/bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f/hamster_lib/helpers/helpers.py#L65-L147 | [
"def extract_time_info(text):\n \"\"\"\n Extract valid time(-range) information from a string according to our specs.\n\n Args:\n text (text_type): Raw string containing encoded time(-span) information.\n Date/Time-combinations are expected in a ``YYYY-MM-DD hh:mm`` format.\n Relative times can be given with ``-minutes``.\n Please note that either *relative* or *absolute* times will be considered.\n It is possible to either just specify a start date (as time, date,\n or datetime) or a timerange (start and end). If a timerange is given\n start and end need to be delimited exactly by ' - '.\n\n Returns:\n tuple: ``(timeframe, rest)`` tuple. Where ``timeframe`` is a tuple that\n provides convinient access to all seperate elements extracted from\n the raw string and ``rest`` is any substring stat has not been\n matched to valid time/date info.\n\n Note:\n * Relative times always return just ``(None, None, None, None, timedelta)``.\n \"\"\"\n\n # [TODO] Add a list of supported formats.\n\n def get_time(time, seconds=None):\n \"\"\"Convert a times string representation to datetime.time instance.\"\"\"\n if time is None:\n return time\n\n if seconds:\n time_format = '%H:%M:%S'\n else:\n time_format = '%H:%M'\n\n return datetime.datetime.strptime(time.strip(), time_format).time()\n\n def get_date(date):\n \"\"\"Convert a dates string representation to datetime.date instance.\"\"\"\n if date:\n date = datetime.datetime.strptime(date.strip(), \"%Y-%m-%d\").date()\n return date\n\n def date_time_from_groupdict(groupdict):\n \"\"\"Return a date/time tuple by introspecting a passed dict.\"\"\"\n if groupdict['datetime']:\n dt = parse_time(groupdict['datetime'])\n time = dt.time()\n date = dt.date()\n else:\n date = get_date(groupdict.get('date'))\n time = get_time(groupdict.get('time'), groupdict.get('seconds'))\n return (date, time)\n\n # Baseline/default values.\n result = {\n 'start_date': None,\n 'start_time': None,\n 'end_date': None,\n 'end_time': None,\n 'offset': None\n }\n rest = None\n\n # Individual patterns for time/date substrings.\n relative_pattern = '(?P<relative>-\\d+)'\n time_pattern = '(?P<time>\\d{2}:\\d{2}(?P<seconds>:\\d{2})?)'\n date_pattern = '(?P<date>\\d{4}-\\d{2}-\\d{2})'\n datetime_pattern = '(?P<datetime>\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}(:\\d{2})?)'\n\n start = re.match('^({}|{}|{}|{}) (?P<rest>.+)'.format(relative_pattern, datetime_pattern,\n date_pattern, time_pattern), text)\n if start:\n start_groups = start.groupdict()\n if start_groups['relative']:\n result['offset'] = datetime.timedelta(minutes=abs(int(start_groups['relative'])))\n else:\n date, time = date_time_from_groupdict(start_groups)\n result['start_date'] = date\n result['start_time'] = time\n rest = start_groups['rest']\n\n if rest:\n end = re.match('^- ({}|{}|{}) (?P<rest>.+)'.format(datetime_pattern, date_pattern,\n time_pattern), rest)\n else:\n end = None\n\n if end and not start_groups['relative']:\n end_groups = end.groupdict()\n date, time = date_time_from_groupdict(end_groups)\n result['end_date'] = date\n result['end_time'] = time\n rest = end_groups['rest']\n\n result = TimeFrame(result['start_date'], result['start_time'], result['end_date'],\n result['end_time'], result['offset'])\n\n # Consider the whole string as 'rest' if no time/date info was extracted\n if not rest:\n rest = text\n return (result, rest.strip())\n",
"def at_split(string):\n \"\"\"\n Return everything in front of the (leftmost) '@'-symbol, if it was used.\n\n Args:\n string (str): The string to be parsed.\n\n Returns:\n tuple: (front, back) representing the substrings before and after the\n most left ``@`` symbol. If no such symbol was present at all,\n ``back=None``. Both substrings have been trimmed of any leading\n and trailing whitespace.\n\n Note:\n If our string contains multiple ``@`` symbols, all but the most left\n one will be treated as part of the regular ``back`` string.\n This allows for usage of the symbol in descriptions, categories and tags.\n\n Also note that *no tags are extracted* any tags included will be considered\n part of the ``category`` string. We are likely to remove this parsing function\n in ``0.14.0`` in favour of a regex based solution so we will not spend\n time on tags for now\n \"\"\"\n result = string.split('@', 1)\n length = len(result)\n if length == 1:\n front, back = result[0].strip(), None\n else:\n front, back = result\n front, back = front.strip(), back.strip()\n return (front, back)\n",
"def comma_split(string):\n \"\"\"\n Split string at the most left comma.\n\n Args:\n string (str): String to be processed. At this stage this should\n look something like ``<Category> and <tags>, <Description>\n\n\n Returns\n tuple: (category_and_tags, description). Both substrings have their\n leading/trailing whitespace removed.\n ``category_and_tags`` may include >=0 tags indicated by a leading ``#``.\n As we have used the most left ``,`` to separate both substrings that\n means that categories and tags can not contain any ``,`` but the\n description text may contain as many as wished.\n \"\"\"\n\n result = string.split(',', 1)\n length = len(result)\n if length == 1:\n category, description = result[0].strip(), None\n else:\n category, description = tuple(result)\n category, description = category.strip(), description.strip()\n return (category.strip(), description)\n"
] | # -*- encoding: utf-8 -*-
# Copyright (C) 2015-2016 Eric Goller <eric.goller@ninjaduck.solutions>
# This file is part of 'hamster-lib'.
#
# 'hamster-lib' is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# 'hamster-lib' is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 'hamster-lib'. If not, see <http://www.gnu.org/licenses/>.
"""
This module provides several convenience and intermediate functions to perform common tasks.
"""
import pickle
from hamster_lib.helpers import time as time_helpers
# Non public helpers
# These should be of very little use for any client module.
def _load_tmp_fact(filepath):
"""
Load an 'ongoing fact' from a given location.
Args:
filepath: Full path to the tmpfile location.
Returns:
hamster_lib.Fact: ``Fact`` representing the 'ongoing fact'. Returns ``False``
if no file was found.
Raises:
TypeError: If for some reason our stored instance is no instance of
``hamster_lib.Fact``.
"""
from hamster_lib import Fact
try:
with open(filepath, 'rb') as fobj:
fact = pickle.load(fobj)
except IOError:
fact = False
else:
if not isinstance(fact, Fact):
raise TypeError(_(
"Something went wrong. It seems our pickled file does not contain"
" valid Fact instance. [Content: '{content}'; Type: {type}".format(
content=fact, type=type(fact))
))
return fact
|
wikimedia/ores | ores/scoring_systems/celery_queue.py | redis_from_url | python | def redis_from_url(url):
# Makes sure that we only try to import redis when we need
# to use it
import redis
url = url or ""
parsed_url = urlparse(url)
if parsed_url.scheme != "redis":
return None
kwargs = {}
match = PASS_HOST_PORT.match(parsed_url.netloc)
if match.group('password') is not None:
kwargs['password'] = match.group('password')
if match.group('host') is not None:
kwargs['host'] = match.group('host')
if match.group('port') is not None:
kwargs['port'] = int(match.group('port'))
if len(parsed_url.path) > 1:
# Removes "/" from the beginning
kwargs['db'] = int(parsed_url.path[1:])
return redis.StrictRedis(**kwargs) | Converts a redis URL used by celery into a `redis.Redis` object. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring_systems/celery_queue.py#L263-L289 | null | import logging
import re
from itertools import chain
from urllib.parse import urlparse
import celery
import celery.exceptions
import celery.states
import mwapi.errors
import revscoring.errors
from ores.score_request import ScoreRequest
from .. import errors
from ..task_tracker import NullTaskTracker, RedisTaskTracker
from .scoring_system import ScoringSystem
logger = logging.getLogger(__name__)
_applications = []
DEFAULT_CELERY_QUEUE = "celery"
SENT = "SENT"
REQUESTED = "REQUESTED"
class CeleryQueue(ScoringSystem):
def __init__(self, *args, application, queue_maxsize=None,
task_tracker=None, **kwargs):
super().__init__(*args, **kwargs)
global _applications
self.application = application
self.queue_maxsize = int(queue_maxsize) if queue_maxsize is not None \
else None
self.redis = redis_from_url(self.application.conf.BROKER_URL)
self.task_tracker = task_tracker or NullTaskTracker()
if self.queue_maxsize is not None and self.redis is None:
logger.warning("No redis connection. Can't check queue size")
self._initialize_tasks()
_applications.append(application)
def _initialize_tasks(self):
expected_errors = (revscoring.errors.RevisionNotFound,
revscoring.errors.PageNotFound,
revscoring.errors.UserNotFound,
revscoring.errors.DependencyError,
mwapi.errors.RequestError,
mwapi.errors.TimeoutError,
errors.TimeoutError)
@self.application.task(throws=expected_errors,
queue=DEFAULT_CELERY_QUEUE)
def _process_score_map(request, model_names, rev_id, root_cache):
if not isinstance(request, ScoreRequest):
request = ScoreRequest.from_json(request)
if not isinstance(model_names, frozenset):
model_names = frozenset(model_names)
logger.info("Generating a score map for {0}"
.format(request.format(rev_id, model_names)))
score_map = ScoringSystem._process_score_map(
self, request, rev_id, model_names,
root_cache=root_cache)
logger.info("Completed generating score map for {0}"
.format(request.format(rev_id, model_names)))
return score_map
self._process_score_map = _process_score_map
def _process_missing_scores(self, request, missing_model_set_revs,
root_caches, inprogress_results=None):
logger.debug("Processing missing scores {0}:{1}."
.format(request.context_name, missing_model_set_revs))
context = self[request.context_name]
inprogress_results = inprogress_results or {}
# Generate score results
results = {}
for missing_models, rev_ids in missing_model_set_revs.items():
for rev_id in rev_ids:
injection_cache = request.injection_caches.get(rev_id)
if rev_id not in root_caches:
for model_name in missing_models:
task_id = context.format_id_string(
model_name, rev_id, request,
injection_cache=injection_cache)
self.application.backend.mark_as_failure(
task_id, RuntimeError("Never started"))
continue
root_cache = {str(k): v for k, v in root_caches[rev_id].items()}
result = self._process_score_map.delay(
request.to_json(), list(missing_models), rev_id, root_cache)
self._lock_process(missing_models, rev_id, request,
injection_cache, result.id)
for model_name in missing_models:
if rev_id in results:
results[rev_id][model_name] = result
else:
results[rev_id] = {model_name: result}
# Read results
rev_scores = {}
score_errors = {}
combined_results = chain(inprogress_results.items(), results.items())
for rev_id, model_results in combined_results:
injection_cache = request.injection_caches.get(rev_id)
if rev_id not in rev_scores:
rev_scores[rev_id] = {}
for model_name, score_result in model_results.items():
try:
task_result = score_result.get(timeout=self.timeout)
except celery.exceptions.TimeoutError:
timeout_error = errors.TimeoutError(
"Timed out after {0} seconds.".format(self.timeout))
score_errors[rev_id] = timeout_error
self.application.backend.mark_as_failure(
score_result.id, timeout_error)
except Exception as error:
score_errors[rev_id] = error
else:
if model_name in task_result:
rev_scores[rev_id][model_name] = task_result[model_name]
else:
raise RuntimeError('Model is not in the task but '
'the task locked the model')
key = context.format_id_string(
model_name, rev_id, request,
injection_cache=injection_cache)
self.task_tracker.release(key)
return rev_scores, score_errors
def _lock_process(self, models, rev_id, request, injection_cache,
task_id):
context = self[request.context_name]
for model in models:
key = context.format_id_string(
model, rev_id, request,
injection_cache=injection_cache)
self.task_tracker.lock(key, task_id)
def _lookup_inprogress_results(self, request, response):
context = self[request.context_name]
inprogress_results = {}
for rev_id in request.rev_ids:
injection_cache = request.injection_caches.get(rev_id)
for model_name in request.model_names:
if rev_id in response.scores and \
model_name in response.scores[rev_id]:
continue
key = context.format_id_string(
model_name, rev_id, request,
injection_cache=injection_cache)
task_id = self.task_tracker.get_in_progress_task(key)
if task_id:
score_result = \
self._process_score_map.AsyncResult(task_id)
logger.info("Found in-progress result for {0} -- {1}"
.format(task_id, score_result.state))
if rev_id in inprogress_results:
inprogress_results[rev_id][model_name] = score_result
else:
inprogress_results[rev_id] = {model_name: score_result}
return inprogress_results
def _register_model_set_revs_to_process(self, request, model_set_revs):
context = self[request.context_name]
for model_set, rev_ids in model_set_revs.items():
for rev_id in rev_ids:
for model_name in model_set:
injection_cache = request.injection_caches.get(rev_id)
task_id = context.format_id_string(
model_name, rev_id, request,
injection_cache=injection_cache)
self.application.backend.store_result(
task_id, {}, REQUESTED)
def _score(self, *args, **kwargs):
self._check_queue_full()
return super()._score(*args, **kwargs)
def _check_queue_full(self):
# Check redis to see if the queue of waiting tasks is too big.
# This is a hack to implement backpressure because celery doesn't
# support it natively.
# This will result in a race condition, but it should have OK
# properties.
if self.redis is not None and self.queue_maxsize is not None:
queue_size = self.redis.llen(DEFAULT_CELERY_QUEUE)
if queue_size > self.queue_maxsize:
message = "Queue size is too full {0}".format(queue_size)
logger.warning(message)
raise errors.ScoreProcessorOverloaded(message)
@classmethod
def _build_context_map(cls, config, name, section_key="scoring_systems"):
from .. import ores
from ..scoring_context import ScoringContext, ClientScoringContext
section = config[section_key][name]
if hasattr(ores, "_is_wsgi_client") and ores._is_wsgi_client:
ScoringContextClass = ClientScoringContext
else:
ScoringContextClass = ScoringContext
return {name: ScoringContextClass.from_config(config, name)
for name in section['scoring_contexts']}
@classmethod
def from_config(cls, config, name, section_key="scoring_systems"):
logger.info("Loading CeleryQueue '{0}' from config.".format(name))
section = config[section_key][name]
kwargs = cls._kwargs_from_config(
config, name, section_key=section_key)
queue_maxsize = section.get('queue_maxsize')
if 'task_tracker' in section:
task_tracker = RedisTaskTracker.from_config(
config, section['task_tracker'])
else:
task_tracker = None
application = celery.Celery(__name__)
application.conf.update(**{k: v for k, v in section.items()
if k not in ('class', 'context_map',
'score_cache',
'metrics_collector', 'timeout',
'queue_maxsize')})
return cls(application=application,
queue_maxsize=queue_maxsize,
task_tracker=task_tracker, **kwargs)
PASS_HOST_PORT = re.compile(
r"(:(?P<password>[^@]+)@)?" +
r"(?P<host>[^:]+)?" +
r"(:(?P<port>[0-9]+))?"
)
"""
Matches <password>@<host>:<port>
"""
|
wikimedia/ores | ores/scoring_context.py | ScoringContext.process_model_scores | python | def process_model_scores(self, model_names, root_cache,
include_features=False):
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores | Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring_context.py#L61-L93 | [
"def _solve_base_feature_map(self, model_name, dependency_cache=None):\n \"\"\"\n Solves the leaf :class:`revscoring.Feature` from the dependency for\n `model_name` using `dependency_cache`. This will return a mapping\n between the `str` name of the base features and the solved values.\n \"\"\"\n features = list(trim(self[model_name].features))\n feature_values = self.extractor.solve(features, cache=dependency_cache)\n return {str(f): v\n for f, v in zip(features, feature_values)}\n",
"def _process_score(self, model_name, dependency_cache=None):\n \"\"\"\n Generates a score for a given model using the `dependency_cache`.\n \"\"\"\n version = self[model_name].version\n\n start = time.time()\n feature_values = self._solve_features(model_name, dependency_cache)\n logger.debug(\"Extracted features for {0}:{1}:{2} in {3} secs\"\n .format(self.name, model_name, version,\n round(time.time() - start, 3)))\n\n start = time.time()\n score = self[model_name].score(feature_values)\n logger.debug(\"Scored features for {0}:{1}:{2} in {3} secs\"\n .format(self.name, model_name, version,\n round(time.time() - start, 3)))\n\n return score\n"
] | class ScoringContext(dict):
"""
Represents a context in which scoring can take place. Usually, a wiki is
1:1 with a "ScoringContext".
:Parameters:
name : str
The name of the context in which scoring will happen. This is
usually a wiki's database name.
model_map : dict
A mapping between names and
:class:`revscoring.ScorerModel`
instances
extractor : :class:`revscoring.Extractor`
An extractor to use for gathering feature values
"""
def __init__(self, name, model_map, extractor):
super().__init__()
self.name = str(name)
self.update(model_map)
self.extractor = extractor
def format_model_info(self, model_name, paths=None):
model_info = self._get_model_info_for(model_name)
return model_info.format(paths, formatting="json")
def format_id_string(self, model_name, rev_id, request, injection_cache=None):
version = self.model_version(model_name)
score_id = ":".join(
str(v) for v in [self.name, model_name, version, rev_id])
if request.include_features:
score_id += ":features"
if injection_cache is None:
return score_id
else:
sorted_tuple = tuple(sorted(injection_cache.items()))
cache_hash = sha1(bytes(str(sorted_tuple), 'utf8')).hexdigest()
return score_id + ":" + cache_hash
def _get_model_info_for(self, model_name):
return self[model_name].info
def model_version(self, model_name):
return self[model_name].version
def model_features(self, model_name):
return self[model_name].features
def _solve_features(self, model_name, dependency_cache=None):
"""
Solves the vector (`list`) of features for a given model using
the `dependency_cache` and returns them.
"""
features = self[model_name].features
return list(self.extractor.solve(features, cache=dependency_cache))
def _solve_base_feature_map(self, model_name, dependency_cache=None):
"""
Solves the leaf :class:`revscoring.Feature` from the dependency for
`model_name` using `dependency_cache`. This will return a mapping
between the `str` name of the base features and the solved values.
"""
features = list(trim(self[model_name].features))
feature_values = self.extractor.solve(features, cache=dependency_cache)
return {str(f): v
for f, v in zip(features, feature_values)}
def _process_score(self, model_name, dependency_cache=None):
"""
Generates a score for a given model using the `dependency_cache`.
"""
version = self[model_name].version
start = time.time()
feature_values = self._solve_features(model_name, dependency_cache)
logger.debug("Extracted features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
start = time.time()
score = self[model_name].score(feature_values)
logger.debug("Scored features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
return score
def _generate_root_datasources(self, model_names):
for model_name in model_names:
for dependency in dependencies.dig(self.model_features(model_name)):
yield dependency
def extract_root_dependency_caches(
self, model_names, rev_ids, injection_caches=None):
"""
Extracts a mapping of root :class:`revscoring.Datasource`
capable of generating the features needed for a particular set of
models without additional IO. This method implements all of the IO
heavy operations. The roots dependency caches produced by calling
this method can then be passed to `process_model_scores()` for scoring.
:Parameters:
model_names : `list` ( `str` )
The names of a :class:`revscoring.ScorerModel` to
extract the roots dependencies for
"""
# Make a copy of injection_caches
_injection_caches = {}
for rev_id in rev_ids:
injection_cache = injection_caches.get(rev_id, {}) \
if injection_caches is not None else {}
_injection_caches[rev_id] = dict(injection_cache.items())
# Find our root datasources
root_datasources = \
list(set(self._generate_root_datasources(model_names)))
start = time.time()
error_root_vals = self.extractor.extract(
rev_ids, root_datasources, caches=_injection_caches)
# Check each extraction for errors
root_caches = {}
errors = {}
for rev_id, (error, values) in zip(rev_ids, error_root_vals):
if error is not None:
errors[rev_id] = error
if rev_id in root_caches:
del root_caches[rev_id]
else:
root_caches[rev_id] = dict(zip(root_datasources, values))
logger.debug("Extracted root datasources for {0}:{1}:{2} in {3} secs"
.format(self.name, set(model_names), rev_ids,
round(time.time() - start, 3)))
# Note that root_caches should have been modified in place
return root_caches, errors
@classmethod
def map_from_config(cls, config, context_names,
section_key="scoring_contexts"):
"""
Loads a whole set of ScoringContext's from a configuration file
while maintaining a cache of model names. This aids in better memory
management and allows model aliases to be implemented at the
configuration level.
:Returns:
A map of context_names and ScoringContext's where models are loaded
once and reused cross contexts.
"""
model_key_map = {}
context_map = {}
for context_name in context_names:
section = config[section_key][context_name]
model_map = {}
for model_name, key in section['scorer_models'].items():
if key in model_key_map:
scorer_model = model_key_map[key]
else:
scorer_model = Model.from_config(config, key)
model_key_map[key] = scorer_model
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
context_map[context_name] = cls(
context_name, model_map=model_map, extractor=extractor)
return context_map
@classmethod
def from_config(cls, config, name, section_key="scoring_contexts"):
"""
Expects:
scoring_contexts:
enwiki:
scorer_models:
damaging: enwiki_damaging_2014
good-faith: enwiki_good-faith_2014
extractor: enwiki
ptwiki:
scorer_models:
damaging: ptwiki_damaging_2014
good-faith: ptwiki_good-faith_2014
extractor: ptwiki
extractors:
enwiki_api: ...
ptwiki_api: ...
scorer_models:
enwiki_damaging_2014: ...
enwiki_good-faith_2014: ...
"""
logger.info("Loading {0} '{1}' from config.".format(cls.__name__, name))
section = config[section_key][name]
model_map = {}
for model_name, key in section['scorer_models'].items():
scorer_model = Model.from_config(config, key)
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
return cls(name, model_map=model_map, extractor=extractor)
|
wikimedia/ores | ores/scoring_context.py | ScoringContext._solve_features | python | def _solve_features(self, model_name, dependency_cache=None):
features = self[model_name].features
return list(self.extractor.solve(features, cache=dependency_cache)) | Solves the vector (`list`) of features for a given model using
the `dependency_cache` and returns them. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring_context.py#L95-L101 | null | class ScoringContext(dict):
"""
Represents a context in which scoring can take place. Usually, a wiki is
1:1 with a "ScoringContext".
:Parameters:
name : str
The name of the context in which scoring will happen. This is
usually a wiki's database name.
model_map : dict
A mapping between names and
:class:`revscoring.ScorerModel`
instances
extractor : :class:`revscoring.Extractor`
An extractor to use for gathering feature values
"""
def __init__(self, name, model_map, extractor):
super().__init__()
self.name = str(name)
self.update(model_map)
self.extractor = extractor
def format_model_info(self, model_name, paths=None):
model_info = self._get_model_info_for(model_name)
return model_info.format(paths, formatting="json")
def format_id_string(self, model_name, rev_id, request, injection_cache=None):
version = self.model_version(model_name)
score_id = ":".join(
str(v) for v in [self.name, model_name, version, rev_id])
if request.include_features:
score_id += ":features"
if injection_cache is None:
return score_id
else:
sorted_tuple = tuple(sorted(injection_cache.items()))
cache_hash = sha1(bytes(str(sorted_tuple), 'utf8')).hexdigest()
return score_id + ":" + cache_hash
def _get_model_info_for(self, model_name):
return self[model_name].info
def model_version(self, model_name):
return self[model_name].version
def model_features(self, model_name):
return self[model_name].features
def process_model_scores(self, model_names, root_cache,
include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores
def _solve_base_feature_map(self, model_name, dependency_cache=None):
"""
Solves the leaf :class:`revscoring.Feature` from the dependency for
`model_name` using `dependency_cache`. This will return a mapping
between the `str` name of the base features and the solved values.
"""
features = list(trim(self[model_name].features))
feature_values = self.extractor.solve(features, cache=dependency_cache)
return {str(f): v
for f, v in zip(features, feature_values)}
def _process_score(self, model_name, dependency_cache=None):
"""
Generates a score for a given model using the `dependency_cache`.
"""
version = self[model_name].version
start = time.time()
feature_values = self._solve_features(model_name, dependency_cache)
logger.debug("Extracted features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
start = time.time()
score = self[model_name].score(feature_values)
logger.debug("Scored features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
return score
def _generate_root_datasources(self, model_names):
for model_name in model_names:
for dependency in dependencies.dig(self.model_features(model_name)):
yield dependency
def extract_root_dependency_caches(
self, model_names, rev_ids, injection_caches=None):
"""
Extracts a mapping of root :class:`revscoring.Datasource`
capable of generating the features needed for a particular set of
models without additional IO. This method implements all of the IO
heavy operations. The roots dependency caches produced by calling
this method can then be passed to `process_model_scores()` for scoring.
:Parameters:
model_names : `list` ( `str` )
The names of a :class:`revscoring.ScorerModel` to
extract the roots dependencies for
"""
# Make a copy of injection_caches
_injection_caches = {}
for rev_id in rev_ids:
injection_cache = injection_caches.get(rev_id, {}) \
if injection_caches is not None else {}
_injection_caches[rev_id] = dict(injection_cache.items())
# Find our root datasources
root_datasources = \
list(set(self._generate_root_datasources(model_names)))
start = time.time()
error_root_vals = self.extractor.extract(
rev_ids, root_datasources, caches=_injection_caches)
# Check each extraction for errors
root_caches = {}
errors = {}
for rev_id, (error, values) in zip(rev_ids, error_root_vals):
if error is not None:
errors[rev_id] = error
if rev_id in root_caches:
del root_caches[rev_id]
else:
root_caches[rev_id] = dict(zip(root_datasources, values))
logger.debug("Extracted root datasources for {0}:{1}:{2} in {3} secs"
.format(self.name, set(model_names), rev_ids,
round(time.time() - start, 3)))
# Note that root_caches should have been modified in place
return root_caches, errors
@classmethod
def map_from_config(cls, config, context_names,
section_key="scoring_contexts"):
"""
Loads a whole set of ScoringContext's from a configuration file
while maintaining a cache of model names. This aids in better memory
management and allows model aliases to be implemented at the
configuration level.
:Returns:
A map of context_names and ScoringContext's where models are loaded
once and reused cross contexts.
"""
model_key_map = {}
context_map = {}
for context_name in context_names:
section = config[section_key][context_name]
model_map = {}
for model_name, key in section['scorer_models'].items():
if key in model_key_map:
scorer_model = model_key_map[key]
else:
scorer_model = Model.from_config(config, key)
model_key_map[key] = scorer_model
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
context_map[context_name] = cls(
context_name, model_map=model_map, extractor=extractor)
return context_map
@classmethod
def from_config(cls, config, name, section_key="scoring_contexts"):
"""
Expects:
scoring_contexts:
enwiki:
scorer_models:
damaging: enwiki_damaging_2014
good-faith: enwiki_good-faith_2014
extractor: enwiki
ptwiki:
scorer_models:
damaging: ptwiki_damaging_2014
good-faith: ptwiki_good-faith_2014
extractor: ptwiki
extractors:
enwiki_api: ...
ptwiki_api: ...
scorer_models:
enwiki_damaging_2014: ...
enwiki_good-faith_2014: ...
"""
logger.info("Loading {0} '{1}' from config.".format(cls.__name__, name))
section = config[section_key][name]
model_map = {}
for model_name, key in section['scorer_models'].items():
scorer_model = Model.from_config(config, key)
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
return cls(name, model_map=model_map, extractor=extractor)
|
wikimedia/ores | ores/scoring_context.py | ScoringContext._solve_base_feature_map | python | def _solve_base_feature_map(self, model_name, dependency_cache=None):
features = list(trim(self[model_name].features))
feature_values = self.extractor.solve(features, cache=dependency_cache)
return {str(f): v
for f, v in zip(features, feature_values)} | Solves the leaf :class:`revscoring.Feature` from the dependency for
`model_name` using `dependency_cache`. This will return a mapping
between the `str` name of the base features and the solved values. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring_context.py#L103-L112 | null | class ScoringContext(dict):
"""
Represents a context in which scoring can take place. Usually, a wiki is
1:1 with a "ScoringContext".
:Parameters:
name : str
The name of the context in which scoring will happen. This is
usually a wiki's database name.
model_map : dict
A mapping between names and
:class:`revscoring.ScorerModel`
instances
extractor : :class:`revscoring.Extractor`
An extractor to use for gathering feature values
"""
def __init__(self, name, model_map, extractor):
super().__init__()
self.name = str(name)
self.update(model_map)
self.extractor = extractor
def format_model_info(self, model_name, paths=None):
model_info = self._get_model_info_for(model_name)
return model_info.format(paths, formatting="json")
def format_id_string(self, model_name, rev_id, request, injection_cache=None):
version = self.model_version(model_name)
score_id = ":".join(
str(v) for v in [self.name, model_name, version, rev_id])
if request.include_features:
score_id += ":features"
if injection_cache is None:
return score_id
else:
sorted_tuple = tuple(sorted(injection_cache.items()))
cache_hash = sha1(bytes(str(sorted_tuple), 'utf8')).hexdigest()
return score_id + ":" + cache_hash
def _get_model_info_for(self, model_name):
return self[model_name].info
def model_version(self, model_name):
return self[model_name].version
def model_features(self, model_name):
return self[model_name].features
def process_model_scores(self, model_names, root_cache,
include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores
def _solve_features(self, model_name, dependency_cache=None):
"""
Solves the vector (`list`) of features for a given model using
the `dependency_cache` and returns them.
"""
features = self[model_name].features
return list(self.extractor.solve(features, cache=dependency_cache))
def _process_score(self, model_name, dependency_cache=None):
"""
Generates a score for a given model using the `dependency_cache`.
"""
version = self[model_name].version
start = time.time()
feature_values = self._solve_features(model_name, dependency_cache)
logger.debug("Extracted features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
start = time.time()
score = self[model_name].score(feature_values)
logger.debug("Scored features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
return score
def _generate_root_datasources(self, model_names):
for model_name in model_names:
for dependency in dependencies.dig(self.model_features(model_name)):
yield dependency
def extract_root_dependency_caches(
self, model_names, rev_ids, injection_caches=None):
"""
Extracts a mapping of root :class:`revscoring.Datasource`
capable of generating the features needed for a particular set of
models without additional IO. This method implements all of the IO
heavy operations. The roots dependency caches produced by calling
this method can then be passed to `process_model_scores()` for scoring.
:Parameters:
model_names : `list` ( `str` )
The names of a :class:`revscoring.ScorerModel` to
extract the roots dependencies for
"""
# Make a copy of injection_caches
_injection_caches = {}
for rev_id in rev_ids:
injection_cache = injection_caches.get(rev_id, {}) \
if injection_caches is not None else {}
_injection_caches[rev_id] = dict(injection_cache.items())
# Find our root datasources
root_datasources = \
list(set(self._generate_root_datasources(model_names)))
start = time.time()
error_root_vals = self.extractor.extract(
rev_ids, root_datasources, caches=_injection_caches)
# Check each extraction for errors
root_caches = {}
errors = {}
for rev_id, (error, values) in zip(rev_ids, error_root_vals):
if error is not None:
errors[rev_id] = error
if rev_id in root_caches:
del root_caches[rev_id]
else:
root_caches[rev_id] = dict(zip(root_datasources, values))
logger.debug("Extracted root datasources for {0}:{1}:{2} in {3} secs"
.format(self.name, set(model_names), rev_ids,
round(time.time() - start, 3)))
# Note that root_caches should have been modified in place
return root_caches, errors
@classmethod
def map_from_config(cls, config, context_names,
section_key="scoring_contexts"):
"""
Loads a whole set of ScoringContext's from a configuration file
while maintaining a cache of model names. This aids in better memory
management and allows model aliases to be implemented at the
configuration level.
:Returns:
A map of context_names and ScoringContext's where models are loaded
once and reused cross contexts.
"""
model_key_map = {}
context_map = {}
for context_name in context_names:
section = config[section_key][context_name]
model_map = {}
for model_name, key in section['scorer_models'].items():
if key in model_key_map:
scorer_model = model_key_map[key]
else:
scorer_model = Model.from_config(config, key)
model_key_map[key] = scorer_model
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
context_map[context_name] = cls(
context_name, model_map=model_map, extractor=extractor)
return context_map
@classmethod
def from_config(cls, config, name, section_key="scoring_contexts"):
"""
Expects:
scoring_contexts:
enwiki:
scorer_models:
damaging: enwiki_damaging_2014
good-faith: enwiki_good-faith_2014
extractor: enwiki
ptwiki:
scorer_models:
damaging: ptwiki_damaging_2014
good-faith: ptwiki_good-faith_2014
extractor: ptwiki
extractors:
enwiki_api: ...
ptwiki_api: ...
scorer_models:
enwiki_damaging_2014: ...
enwiki_good-faith_2014: ...
"""
logger.info("Loading {0} '{1}' from config.".format(cls.__name__, name))
section = config[section_key][name]
model_map = {}
for model_name, key in section['scorer_models'].items():
scorer_model = Model.from_config(config, key)
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
return cls(name, model_map=model_map, extractor=extractor)
|
wikimedia/ores | ores/scoring_context.py | ScoringContext._process_score | python | def _process_score(self, model_name, dependency_cache=None):
version = self[model_name].version
start = time.time()
feature_values = self._solve_features(model_name, dependency_cache)
logger.debug("Extracted features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
start = time.time()
score = self[model_name].score(feature_values)
logger.debug("Scored features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
return score | Generates a score for a given model using the `dependency_cache`. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring_context.py#L114-L132 | [
"def _solve_features(self, model_name, dependency_cache=None):\n \"\"\"\n Solves the vector (`list`) of features for a given model using\n the `dependency_cache` and returns them.\n \"\"\"\n features = self[model_name].features\n return list(self.extractor.solve(features, cache=dependency_cache))\n"
] | class ScoringContext(dict):
"""
Represents a context in which scoring can take place. Usually, a wiki is
1:1 with a "ScoringContext".
:Parameters:
name : str
The name of the context in which scoring will happen. This is
usually a wiki's database name.
model_map : dict
A mapping between names and
:class:`revscoring.ScorerModel`
instances
extractor : :class:`revscoring.Extractor`
An extractor to use for gathering feature values
"""
def __init__(self, name, model_map, extractor):
super().__init__()
self.name = str(name)
self.update(model_map)
self.extractor = extractor
def format_model_info(self, model_name, paths=None):
model_info = self._get_model_info_for(model_name)
return model_info.format(paths, formatting="json")
def format_id_string(self, model_name, rev_id, request, injection_cache=None):
version = self.model_version(model_name)
score_id = ":".join(
str(v) for v in [self.name, model_name, version, rev_id])
if request.include_features:
score_id += ":features"
if injection_cache is None:
return score_id
else:
sorted_tuple = tuple(sorted(injection_cache.items()))
cache_hash = sha1(bytes(str(sorted_tuple), 'utf8')).hexdigest()
return score_id + ":" + cache_hash
def _get_model_info_for(self, model_name):
return self[model_name].info
def model_version(self, model_name):
return self[model_name].version
def model_features(self, model_name):
return self[model_name].features
def process_model_scores(self, model_names, root_cache,
include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores
def _solve_features(self, model_name, dependency_cache=None):
"""
Solves the vector (`list`) of features for a given model using
the `dependency_cache` and returns them.
"""
features = self[model_name].features
return list(self.extractor.solve(features, cache=dependency_cache))
def _solve_base_feature_map(self, model_name, dependency_cache=None):
"""
Solves the leaf :class:`revscoring.Feature` from the dependency for
`model_name` using `dependency_cache`. This will return a mapping
between the `str` name of the base features and the solved values.
"""
features = list(trim(self[model_name].features))
feature_values = self.extractor.solve(features, cache=dependency_cache)
return {str(f): v
for f, v in zip(features, feature_values)}
def _generate_root_datasources(self, model_names):
for model_name in model_names:
for dependency in dependencies.dig(self.model_features(model_name)):
yield dependency
def extract_root_dependency_caches(
self, model_names, rev_ids, injection_caches=None):
"""
Extracts a mapping of root :class:`revscoring.Datasource`
capable of generating the features needed for a particular set of
models without additional IO. This method implements all of the IO
heavy operations. The roots dependency caches produced by calling
this method can then be passed to `process_model_scores()` for scoring.
:Parameters:
model_names : `list` ( `str` )
The names of a :class:`revscoring.ScorerModel` to
extract the roots dependencies for
"""
# Make a copy of injection_caches
_injection_caches = {}
for rev_id in rev_ids:
injection_cache = injection_caches.get(rev_id, {}) \
if injection_caches is not None else {}
_injection_caches[rev_id] = dict(injection_cache.items())
# Find our root datasources
root_datasources = \
list(set(self._generate_root_datasources(model_names)))
start = time.time()
error_root_vals = self.extractor.extract(
rev_ids, root_datasources, caches=_injection_caches)
# Check each extraction for errors
root_caches = {}
errors = {}
for rev_id, (error, values) in zip(rev_ids, error_root_vals):
if error is not None:
errors[rev_id] = error
if rev_id in root_caches:
del root_caches[rev_id]
else:
root_caches[rev_id] = dict(zip(root_datasources, values))
logger.debug("Extracted root datasources for {0}:{1}:{2} in {3} secs"
.format(self.name, set(model_names), rev_ids,
round(time.time() - start, 3)))
# Note that root_caches should have been modified in place
return root_caches, errors
@classmethod
def map_from_config(cls, config, context_names,
section_key="scoring_contexts"):
"""
Loads a whole set of ScoringContext's from a configuration file
while maintaining a cache of model names. This aids in better memory
management and allows model aliases to be implemented at the
configuration level.
:Returns:
A map of context_names and ScoringContext's where models are loaded
once and reused cross contexts.
"""
model_key_map = {}
context_map = {}
for context_name in context_names:
section = config[section_key][context_name]
model_map = {}
for model_name, key in section['scorer_models'].items():
if key in model_key_map:
scorer_model = model_key_map[key]
else:
scorer_model = Model.from_config(config, key)
model_key_map[key] = scorer_model
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
context_map[context_name] = cls(
context_name, model_map=model_map, extractor=extractor)
return context_map
@classmethod
def from_config(cls, config, name, section_key="scoring_contexts"):
"""
Expects:
scoring_contexts:
enwiki:
scorer_models:
damaging: enwiki_damaging_2014
good-faith: enwiki_good-faith_2014
extractor: enwiki
ptwiki:
scorer_models:
damaging: ptwiki_damaging_2014
good-faith: ptwiki_good-faith_2014
extractor: ptwiki
extractors:
enwiki_api: ...
ptwiki_api: ...
scorer_models:
enwiki_damaging_2014: ...
enwiki_good-faith_2014: ...
"""
logger.info("Loading {0} '{1}' from config.".format(cls.__name__, name))
section = config[section_key][name]
model_map = {}
for model_name, key in section['scorer_models'].items():
scorer_model = Model.from_config(config, key)
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
return cls(name, model_map=model_map, extractor=extractor)
|
wikimedia/ores | ores/scoring_context.py | ScoringContext.extract_root_dependency_caches | python | def extract_root_dependency_caches(
self, model_names, rev_ids, injection_caches=None):
# Make a copy of injection_caches
_injection_caches = {}
for rev_id in rev_ids:
injection_cache = injection_caches.get(rev_id, {}) \
if injection_caches is not None else {}
_injection_caches[rev_id] = dict(injection_cache.items())
# Find our root datasources
root_datasources = \
list(set(self._generate_root_datasources(model_names)))
start = time.time()
error_root_vals = self.extractor.extract(
rev_ids, root_datasources, caches=_injection_caches)
# Check each extraction for errors
root_caches = {}
errors = {}
for rev_id, (error, values) in zip(rev_ids, error_root_vals):
if error is not None:
errors[rev_id] = error
if rev_id in root_caches:
del root_caches[rev_id]
else:
root_caches[rev_id] = dict(zip(root_datasources, values))
logger.debug("Extracted root datasources for {0}:{1}:{2} in {3} secs"
.format(self.name, set(model_names), rev_ids,
round(time.time() - start, 3)))
# Note that root_caches should have been modified in place
return root_caches, errors | Extracts a mapping of root :class:`revscoring.Datasource`
capable of generating the features needed for a particular set of
models without additional IO. This method implements all of the IO
heavy operations. The roots dependency caches produced by calling
this method can then be passed to `process_model_scores()` for scoring.
:Parameters:
model_names : `list` ( `str` )
The names of a :class:`revscoring.ScorerModel` to
extract the roots dependencies for | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring_context.py#L139-L183 | [
"def _generate_root_datasources(self, model_names):\n for model_name in model_names:\n for dependency in dependencies.dig(self.model_features(model_name)):\n yield dependency\n"
] | class ScoringContext(dict):
"""
Represents a context in which scoring can take place. Usually, a wiki is
1:1 with a "ScoringContext".
:Parameters:
name : str
The name of the context in which scoring will happen. This is
usually a wiki's database name.
model_map : dict
A mapping between names and
:class:`revscoring.ScorerModel`
instances
extractor : :class:`revscoring.Extractor`
An extractor to use for gathering feature values
"""
def __init__(self, name, model_map, extractor):
super().__init__()
self.name = str(name)
self.update(model_map)
self.extractor = extractor
def format_model_info(self, model_name, paths=None):
model_info = self._get_model_info_for(model_name)
return model_info.format(paths, formatting="json")
def format_id_string(self, model_name, rev_id, request, injection_cache=None):
version = self.model_version(model_name)
score_id = ":".join(
str(v) for v in [self.name, model_name, version, rev_id])
if request.include_features:
score_id += ":features"
if injection_cache is None:
return score_id
else:
sorted_tuple = tuple(sorted(injection_cache.items()))
cache_hash = sha1(bytes(str(sorted_tuple), 'utf8')).hexdigest()
return score_id + ":" + cache_hash
def _get_model_info_for(self, model_name):
return self[model_name].info
def model_version(self, model_name):
return self[model_name].version
def model_features(self, model_name):
return self[model_name].features
def process_model_scores(self, model_names, root_cache,
include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores
def _solve_features(self, model_name, dependency_cache=None):
"""
Solves the vector (`list`) of features for a given model using
the `dependency_cache` and returns them.
"""
features = self[model_name].features
return list(self.extractor.solve(features, cache=dependency_cache))
def _solve_base_feature_map(self, model_name, dependency_cache=None):
"""
Solves the leaf :class:`revscoring.Feature` from the dependency for
`model_name` using `dependency_cache`. This will return a mapping
between the `str` name of the base features and the solved values.
"""
features = list(trim(self[model_name].features))
feature_values = self.extractor.solve(features, cache=dependency_cache)
return {str(f): v
for f, v in zip(features, feature_values)}
def _process_score(self, model_name, dependency_cache=None):
"""
Generates a score for a given model using the `dependency_cache`.
"""
version = self[model_name].version
start = time.time()
feature_values = self._solve_features(model_name, dependency_cache)
logger.debug("Extracted features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
start = time.time()
score = self[model_name].score(feature_values)
logger.debug("Scored features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
return score
def _generate_root_datasources(self, model_names):
for model_name in model_names:
for dependency in dependencies.dig(self.model_features(model_name)):
yield dependency
@classmethod
def map_from_config(cls, config, context_names,
section_key="scoring_contexts"):
"""
Loads a whole set of ScoringContext's from a configuration file
while maintaining a cache of model names. This aids in better memory
management and allows model aliases to be implemented at the
configuration level.
:Returns:
A map of context_names and ScoringContext's where models are loaded
once and reused cross contexts.
"""
model_key_map = {}
context_map = {}
for context_name in context_names:
section = config[section_key][context_name]
model_map = {}
for model_name, key in section['scorer_models'].items():
if key in model_key_map:
scorer_model = model_key_map[key]
else:
scorer_model = Model.from_config(config, key)
model_key_map[key] = scorer_model
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
context_map[context_name] = cls(
context_name, model_map=model_map, extractor=extractor)
return context_map
@classmethod
def from_config(cls, config, name, section_key="scoring_contexts"):
"""
Expects:
scoring_contexts:
enwiki:
scorer_models:
damaging: enwiki_damaging_2014
good-faith: enwiki_good-faith_2014
extractor: enwiki
ptwiki:
scorer_models:
damaging: ptwiki_damaging_2014
good-faith: ptwiki_good-faith_2014
extractor: ptwiki
extractors:
enwiki_api: ...
ptwiki_api: ...
scorer_models:
enwiki_damaging_2014: ...
enwiki_good-faith_2014: ...
"""
logger.info("Loading {0} '{1}' from config.".format(cls.__name__, name))
section = config[section_key][name]
model_map = {}
for model_name, key in section['scorer_models'].items():
scorer_model = Model.from_config(config, key)
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
return cls(name, model_map=model_map, extractor=extractor)
|
wikimedia/ores | ores/scoring_context.py | ScoringContext.map_from_config | python | def map_from_config(cls, config, context_names,
section_key="scoring_contexts"):
model_key_map = {}
context_map = {}
for context_name in context_names:
section = config[section_key][context_name]
model_map = {}
for model_name, key in section['scorer_models'].items():
if key in model_key_map:
scorer_model = model_key_map[key]
else:
scorer_model = Model.from_config(config, key)
model_key_map[key] = scorer_model
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
context_map[context_name] = cls(
context_name, model_map=model_map, extractor=extractor)
return context_map | Loads a whole set of ScoringContext's from a configuration file
while maintaining a cache of model names. This aids in better memory
management and allows model aliases to be implemented at the
configuration level.
:Returns:
A map of context_names and ScoringContext's where models are loaded
once and reused cross contexts. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring_context.py#L186-L216 | null | class ScoringContext(dict):
"""
Represents a context in which scoring can take place. Usually, a wiki is
1:1 with a "ScoringContext".
:Parameters:
name : str
The name of the context in which scoring will happen. This is
usually a wiki's database name.
model_map : dict
A mapping between names and
:class:`revscoring.ScorerModel`
instances
extractor : :class:`revscoring.Extractor`
An extractor to use for gathering feature values
"""
def __init__(self, name, model_map, extractor):
super().__init__()
self.name = str(name)
self.update(model_map)
self.extractor = extractor
def format_model_info(self, model_name, paths=None):
model_info = self._get_model_info_for(model_name)
return model_info.format(paths, formatting="json")
def format_id_string(self, model_name, rev_id, request, injection_cache=None):
version = self.model_version(model_name)
score_id = ":".join(
str(v) for v in [self.name, model_name, version, rev_id])
if request.include_features:
score_id += ":features"
if injection_cache is None:
return score_id
else:
sorted_tuple = tuple(sorted(injection_cache.items()))
cache_hash = sha1(bytes(str(sorted_tuple), 'utf8')).hexdigest()
return score_id + ":" + cache_hash
def _get_model_info_for(self, model_name):
return self[model_name].info
def model_version(self, model_name):
return self[model_name].version
def model_features(self, model_name):
return self[model_name].features
def process_model_scores(self, model_names, root_cache,
include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores
def _solve_features(self, model_name, dependency_cache=None):
"""
Solves the vector (`list`) of features for a given model using
the `dependency_cache` and returns them.
"""
features = self[model_name].features
return list(self.extractor.solve(features, cache=dependency_cache))
def _solve_base_feature_map(self, model_name, dependency_cache=None):
"""
Solves the leaf :class:`revscoring.Feature` from the dependency for
`model_name` using `dependency_cache`. This will return a mapping
between the `str` name of the base features and the solved values.
"""
features = list(trim(self[model_name].features))
feature_values = self.extractor.solve(features, cache=dependency_cache)
return {str(f): v
for f, v in zip(features, feature_values)}
def _process_score(self, model_name, dependency_cache=None):
"""
Generates a score for a given model using the `dependency_cache`.
"""
version = self[model_name].version
start = time.time()
feature_values = self._solve_features(model_name, dependency_cache)
logger.debug("Extracted features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
start = time.time()
score = self[model_name].score(feature_values)
logger.debug("Scored features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
return score
def _generate_root_datasources(self, model_names):
for model_name in model_names:
for dependency in dependencies.dig(self.model_features(model_name)):
yield dependency
def extract_root_dependency_caches(
self, model_names, rev_ids, injection_caches=None):
"""
Extracts a mapping of root :class:`revscoring.Datasource`
capable of generating the features needed for a particular set of
models without additional IO. This method implements all of the IO
heavy operations. The roots dependency caches produced by calling
this method can then be passed to `process_model_scores()` for scoring.
:Parameters:
model_names : `list` ( `str` )
The names of a :class:`revscoring.ScorerModel` to
extract the roots dependencies for
"""
# Make a copy of injection_caches
_injection_caches = {}
for rev_id in rev_ids:
injection_cache = injection_caches.get(rev_id, {}) \
if injection_caches is not None else {}
_injection_caches[rev_id] = dict(injection_cache.items())
# Find our root datasources
root_datasources = \
list(set(self._generate_root_datasources(model_names)))
start = time.time()
error_root_vals = self.extractor.extract(
rev_ids, root_datasources, caches=_injection_caches)
# Check each extraction for errors
root_caches = {}
errors = {}
for rev_id, (error, values) in zip(rev_ids, error_root_vals):
if error is not None:
errors[rev_id] = error
if rev_id in root_caches:
del root_caches[rev_id]
else:
root_caches[rev_id] = dict(zip(root_datasources, values))
logger.debug("Extracted root datasources for {0}:{1}:{2} in {3} secs"
.format(self.name, set(model_names), rev_ids,
round(time.time() - start, 3)))
# Note that root_caches should have been modified in place
return root_caches, errors
@classmethod
@classmethod
def from_config(cls, config, name, section_key="scoring_contexts"):
"""
Expects:
scoring_contexts:
enwiki:
scorer_models:
damaging: enwiki_damaging_2014
good-faith: enwiki_good-faith_2014
extractor: enwiki
ptwiki:
scorer_models:
damaging: ptwiki_damaging_2014
good-faith: ptwiki_good-faith_2014
extractor: ptwiki
extractors:
enwiki_api: ...
ptwiki_api: ...
scorer_models:
enwiki_damaging_2014: ...
enwiki_good-faith_2014: ...
"""
logger.info("Loading {0} '{1}' from config.".format(cls.__name__, name))
section = config[section_key][name]
model_map = {}
for model_name, key in section['scorer_models'].items():
scorer_model = Model.from_config(config, key)
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
return cls(name, model_map=model_map, extractor=extractor)
|
wikimedia/ores | ores/scoring_context.py | ScoringContext.from_config | python | def from_config(cls, config, name, section_key="scoring_contexts"):
logger.info("Loading {0} '{1}' from config.".format(cls.__name__, name))
section = config[section_key][name]
model_map = {}
for model_name, key in section['scorer_models'].items():
scorer_model = Model.from_config(config, key)
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
return cls(name, model_map=model_map, extractor=extractor) | Expects:
scoring_contexts:
enwiki:
scorer_models:
damaging: enwiki_damaging_2014
good-faith: enwiki_good-faith_2014
extractor: enwiki
ptwiki:
scorer_models:
damaging: ptwiki_damaging_2014
good-faith: ptwiki_good-faith_2014
extractor: ptwiki
extractors:
enwiki_api: ...
ptwiki_api: ...
scorer_models:
enwiki_damaging_2014: ...
enwiki_good-faith_2014: ... | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring_context.py#L219-L253 | null | class ScoringContext(dict):
"""
Represents a context in which scoring can take place. Usually, a wiki is
1:1 with a "ScoringContext".
:Parameters:
name : str
The name of the context in which scoring will happen. This is
usually a wiki's database name.
model_map : dict
A mapping between names and
:class:`revscoring.ScorerModel`
instances
extractor : :class:`revscoring.Extractor`
An extractor to use for gathering feature values
"""
def __init__(self, name, model_map, extractor):
super().__init__()
self.name = str(name)
self.update(model_map)
self.extractor = extractor
def format_model_info(self, model_name, paths=None):
model_info = self._get_model_info_for(model_name)
return model_info.format(paths, formatting="json")
def format_id_string(self, model_name, rev_id, request, injection_cache=None):
version = self.model_version(model_name)
score_id = ":".join(
str(v) for v in [self.name, model_name, version, rev_id])
if request.include_features:
score_id += ":features"
if injection_cache is None:
return score_id
else:
sorted_tuple = tuple(sorted(injection_cache.items()))
cache_hash = sha1(bytes(str(sorted_tuple), 'utf8')).hexdigest()
return score_id + ":" + cache_hash
def _get_model_info_for(self, model_name):
return self[model_name].info
def model_version(self, model_name):
return self[model_name].version
def model_features(self, model_name):
return self[model_name].features
def process_model_scores(self, model_names, root_cache,
include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores
def _solve_features(self, model_name, dependency_cache=None):
"""
Solves the vector (`list`) of features for a given model using
the `dependency_cache` and returns them.
"""
features = self[model_name].features
return list(self.extractor.solve(features, cache=dependency_cache))
def _solve_base_feature_map(self, model_name, dependency_cache=None):
"""
Solves the leaf :class:`revscoring.Feature` from the dependency for
`model_name` using `dependency_cache`. This will return a mapping
between the `str` name of the base features and the solved values.
"""
features = list(trim(self[model_name].features))
feature_values = self.extractor.solve(features, cache=dependency_cache)
return {str(f): v
for f, v in zip(features, feature_values)}
def _process_score(self, model_name, dependency_cache=None):
"""
Generates a score for a given model using the `dependency_cache`.
"""
version = self[model_name].version
start = time.time()
feature_values = self._solve_features(model_name, dependency_cache)
logger.debug("Extracted features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
start = time.time()
score = self[model_name].score(feature_values)
logger.debug("Scored features for {0}:{1}:{2} in {3} secs"
.format(self.name, model_name, version,
round(time.time() - start, 3)))
return score
def _generate_root_datasources(self, model_names):
for model_name in model_names:
for dependency in dependencies.dig(self.model_features(model_name)):
yield dependency
def extract_root_dependency_caches(
self, model_names, rev_ids, injection_caches=None):
"""
Extracts a mapping of root :class:`revscoring.Datasource`
capable of generating the features needed for a particular set of
models without additional IO. This method implements all of the IO
heavy operations. The roots dependency caches produced by calling
this method can then be passed to `process_model_scores()` for scoring.
:Parameters:
model_names : `list` ( `str` )
The names of a :class:`revscoring.ScorerModel` to
extract the roots dependencies for
"""
# Make a copy of injection_caches
_injection_caches = {}
for rev_id in rev_ids:
injection_cache = injection_caches.get(rev_id, {}) \
if injection_caches is not None else {}
_injection_caches[rev_id] = dict(injection_cache.items())
# Find our root datasources
root_datasources = \
list(set(self._generate_root_datasources(model_names)))
start = time.time()
error_root_vals = self.extractor.extract(
rev_ids, root_datasources, caches=_injection_caches)
# Check each extraction for errors
root_caches = {}
errors = {}
for rev_id, (error, values) in zip(rev_ids, error_root_vals):
if error is not None:
errors[rev_id] = error
if rev_id in root_caches:
del root_caches[rev_id]
else:
root_caches[rev_id] = dict(zip(root_datasources, values))
logger.debug("Extracted root datasources for {0}:{1}:{2} in {3} secs"
.format(self.name, set(model_names), rev_ids,
round(time.time() - start, 3)))
# Note that root_caches should have been modified in place
return root_caches, errors
@classmethod
def map_from_config(cls, config, context_names,
section_key="scoring_contexts"):
"""
Loads a whole set of ScoringContext's from a configuration file
while maintaining a cache of model names. This aids in better memory
management and allows model aliases to be implemented at the
configuration level.
:Returns:
A map of context_names and ScoringContext's where models are loaded
once and reused cross contexts.
"""
model_key_map = {}
context_map = {}
for context_name in context_names:
section = config[section_key][context_name]
model_map = {}
for model_name, key in section['scorer_models'].items():
if key in model_key_map:
scorer_model = model_key_map[key]
else:
scorer_model = Model.from_config(config, key)
model_key_map[key] = scorer_model
model_map[model_name] = scorer_model
extractor = Extractor.from_config(config, section['extractor'])
context_map[context_name] = cls(
context_name, model_map=model_map, extractor=extractor)
return context_map
@classmethod
|
wikimedia/ores | ores/score_request.py | ScoreRequest.format | python | def format(self, rev_id=None, model_name=None):
rev_ids = rev_id if rev_id is not None else set(self.rev_ids)
model_names = model_name if model_name is not None else set(self.model_names)
common = [self.context_name, rev_ids, model_names]
optional = []
if self.precache:
optional.append("precache")
if self.include_features:
optional.append("features")
if self.injection_caches:
optional.append("injection_caches={0}".format(self.injection_caches))
if self.model_info:
optional.append("model_info=" + json.dumps(self.model_info))
if self.ip:
optional.append("ip={0}".format(self.ip))
return "{0}({1})".format(":".join(repr(v) for v in common),
", ".join(optional)) | Fomat a request or a sub-part of a request based on a rev_id and/or
model_name. This is useful for logging. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/score_request.py#L40-L62 | null | class ScoreRequest:
def __init__(self, context_name, rev_ids, model_names, precache=False,
include_features=False, injection_caches=None,
model_info=None, ip=None):
"""
Construct a ScoreRequest from parameters.
:Parameters:
context_name : str
The name of the content for the query -- usually a wikidb name
rev_ids : `iterable` ( `int` )
A set of revision IDs to score
model_names : `iterable` ( `str` )
A set of model_names to use in scoring
precache : bool
If true, mark the request as a "precache" request
include_features : bool
If true, include feature values in the response
injection_caches : dict
A mapping of injection_cache to `rev_id` to use for injecting
cached data when extracting features/scoring.
model_info : `list` ( `str` )
A list of model information fields to include in the response
"""
self.context_name = context_name
self.rev_ids = set(rev_ids)
self.model_names = set(model_names)
self.precache = precache
self.include_features = include_features
self.injection_caches = injection_caches or {}
self.model_info = model_info
self.ip = ip
def __str__(self):
return self.format()
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
", ".join(repr(v) for v in [
self.context_name,
self.rev_ids,
self.model_names,
"precache={0!r}".format(self.precache),
"include_features={0!r}".format(self.include_features),
"injection_caches={0!r}".format(self.injection_caches),
"ip={0!r}".format(self.ip),
"model_info={0!r}".format(self.model_info)]))
def to_json(self):
return {
'context': self.context_name,
'rev_ids': list(self.rev_ids),
'model_names': list(self.model_names),
'precache': self.precache,
'include_features': self.include_features,
'injection_caches': self.injection_caches,
'ip': self.ip,
'model_info': self.model_info
}
@classmethod
def from_json(cls, data):
return cls(
data['context'],
set(data['rev_ids']),
set(data['model_names']),
precache=data['precache'],
include_features=data['include_features'],
injection_caches=data['injection_caches'],
model_info=data['model_info'],
ip=data['ip'])
|
wikimedia/ores | ores/wsgi/util.py | build_score_request | python | def build_score_request(scoring_system, request, context_name=None, rev_id=None,
model_name=None):
rev_ids = parse_rev_ids(request, rev_id)
model_names = parse_model_names(request, model_name)
precache = 'precache' in request.args
include_features = 'features' in request.args
injection_caches = parse_injection(request, rev_id)
model_info = parse_model_info(request)
if context_name and context_name in scoring_system and not model_names:
model_names = scoring_system[context_name].keys()
# WMF specific solution
if request.headers.get('X-Client-IP') is None:
ip = request.remote_addr.strip()
else:
ip = request.headers['X-Client-IP'].strip()
return ScoreRequest(context_name, rev_ids, model_names,
precache=precache,
include_features=include_features,
injection_caches=injection_caches,
model_info=model_info,
ip=ip) | Build an :class:`ores.ScoreRequest` from information contained in a
request.
:Parameters:
scoring_system : :class:`ores.ScoringSystem`
A scoring system to build request with
request : :class:`flask.Request`
A web request to extract information from
context_name : `str`
The name of the context to perform scoring
rev_id : int
The revision ID to score. Note that multiple IDs can be provided
in `request.args`
model_name = `str`
The name of the model to score. Note that multiple models can be
provided in `request.args` | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/wsgi/util.py#L68-L109 | [
"def parse_rev_ids(request, rev_id):\n if rev_id is not None:\n return [int(rev_id)]\n else:\n return read_bar_split_param(request, \"revids\", type=int)\n",
"def parse_model_names(request, model_name):\n if model_name is not None:\n return [model_name]\n else:\n return read_bar_split_param(request, \"models\", type=str)\n",
"def parse_injection(request, rev_id):\n \"\"\"Parse values for features / datasources of interest.\"\"\"\n cache = {}\n\n if 'inject' in request.values:\n try:\n cache = json.loads(request.values['inject'])\n except json.JSONDecodeError as e:\n raise CacheParsingError(e)\n\n if rev_id is not None:\n rev_cache = cache\n\n try:\n for k, v in request.values.items():\n if k.startswith((\"feature.\", \"datasource.\")):\n rev_cache[k] = json.loads(v)\n except json.JSONDecodeError as e:\n raise CacheParsingError(e)\n\n if len(rev_cache) > 0:\n cache = {rev_id: rev_cache}\n else:\n cache = {int(rev_id): c for rev_id, c in cache.items()}\n\n return cache or None\n",
"def parse_model_info(request):\n return read_bar_split_param(request, \"model_info\", type=str)\n"
] | import json
import logging
import re
from flask_jsonpify import jsonify as flask_jsonify
from ..score_request import ScoreRequest
logger = logging.getLogger(__name__)
class CacheParsingError(Exception):
pass
class ParamError(Exception):
pass
def jsonify(doc):
return flask_jsonify(normalize_json(doc))
def normalize_json(doc):
if isinstance(doc, dict):
return {_ensure_str_key(k): normalize_json(v) for k, v in doc.items()}
else:
return doc
def _ensure_str_key(key):
if isinstance(key, bool):
return "true" if key else "false"
else:
return str(key)
def read_param(request, param, default=None, type=str):
try:
value = request.args.get(param, request.form.get(param))
if value is None:
return default
else:
return type(value)
except (ValueError, TypeError) as e:
raise ParamError("Could not interpret {0}. {1}".format(param, str(e)))
def read_bar_split_param(request, param, default=None, type=str):
values = read_param(request, param, default=default)
if values is None:
return []
try:
return [type(value) for value in values.split("|")]
except (ValueError, TypeError) as e:
raise ParamError("Could not interpret {0}. {1}"
.format(param, str(e)))
def format_error(error):
error_type = error.__class__.__name__
message = str(error)
return {'error': {'type': error_type, 'message': message}}
def parse_rev_ids(request, rev_id):
if rev_id is not None:
return [int(rev_id)]
else:
return read_bar_split_param(request, "revids", type=int)
def parse_model_names(request, model_name):
if model_name is not None:
return [model_name]
else:
return read_bar_split_param(request, "models", type=str)
def parse_injection(request, rev_id):
"""Parse values for features / datasources of interest."""
cache = {}
if 'inject' in request.values:
try:
cache = json.loads(request.values['inject'])
except json.JSONDecodeError as e:
raise CacheParsingError(e)
if rev_id is not None:
rev_cache = cache
try:
for k, v in request.values.items():
if k.startswith(("feature.", "datasource.")):
rev_cache[k] = json.loads(v)
except json.JSONDecodeError as e:
raise CacheParsingError(e)
if len(rev_cache) > 0:
cache = {rev_id: rev_cache}
else:
cache = {int(rev_id): c for rev_id, c in cache.items()}
return cache or None
def parse_model_info(request):
return read_bar_split_param(request, "model_info", type=str)
def build_score_request_from_event(precache_map, event):
context_name = event['database']
rev_id = event['rev_id']
# Check to see if we have the context available in our precache_map
if context_name not in precache_map:
return None
# Start building the response document
event_set = build_event_set(event)
model_names = {m for e in event_set if e in precache_map[context_name]
for m in precache_map[context_name][e]}
if len(model_names) == 0:
return None
return ScoreRequest(context_name, [rev_id], model_names, precache=True)
# TODO: This strategy for building up events is not sustainable.
def build_event_set(event):
"""
Turn an EventStream event into a set of event types that ORES
uses internally.
"""
event_set = set()
if re.match(r"([^\.]+.)?mediawiki\.revision-create$",
event['meta']['topic']):
event_set.add('edit')
user_groups = event.get('performer', {}).get('user_groups', [])
if 'bot' in user_groups:
event_set.add('bot_edit')
else:
event_set.add('nonbot_edit')
if not event.get('rev_parent_id'):
event_set.add('page_creation')
if 'bot' in user_groups:
event_set.add('bot_page_creation')
else:
event_set.add('nonbot_page_creation')
return event_set
AVAILABLE_EVENTS = {'edit', 'bot_edit', 'nonbot_edit', 'page_creation',
'bot_page_creation', 'nonbot_page_creation'}
def build_precache_map(config):
"""
Build a mapping of contexts and models from the configuration
"""
precache_map = {}
ss_name = config['ores']['scoring_system']
for context in config['scoring_systems'][ss_name]['scoring_contexts']:
precache_map[context] = {}
for model in config['scoring_contexts'][context].get('precache', []):
precached_config = \
config['scoring_contexts'][context]['precache'][model]
events = precached_config['on']
if len(set(events) - AVAILABLE_EVENTS) > 0:
logger.warning("{0} events are not available"
.format(set(events) - AVAILABLE_EVENTS))
for event in precached_config['on']:
if event in precache_map[context]:
precache_map[context][event].add(model)
else:
precache_map[context][event] = {model}
logger.debug("Setting up precaching for {0} in {1} on {2}"
.format(model, context, event))
return precache_map
|
wikimedia/ores | ores/wsgi/util.py | parse_injection | python | def parse_injection(request, rev_id):
cache = {}
if 'inject' in request.values:
try:
cache = json.loads(request.values['inject'])
except json.JSONDecodeError as e:
raise CacheParsingError(e)
if rev_id is not None:
rev_cache = cache
try:
for k, v in request.values.items():
if k.startswith(("feature.", "datasource.")):
rev_cache[k] = json.loads(v)
except json.JSONDecodeError as e:
raise CacheParsingError(e)
if len(rev_cache) > 0:
cache = {rev_id: rev_cache}
else:
cache = {int(rev_id): c for rev_id, c in cache.items()}
return cache or None | Parse values for features / datasources of interest. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/wsgi/util.py#L126-L151 | null | import json
import logging
import re
from flask_jsonpify import jsonify as flask_jsonify
from ..score_request import ScoreRequest
logger = logging.getLogger(__name__)
class CacheParsingError(Exception):
pass
class ParamError(Exception):
pass
def jsonify(doc):
return flask_jsonify(normalize_json(doc))
def normalize_json(doc):
if isinstance(doc, dict):
return {_ensure_str_key(k): normalize_json(v) for k, v in doc.items()}
else:
return doc
def _ensure_str_key(key):
if isinstance(key, bool):
return "true" if key else "false"
else:
return str(key)
def read_param(request, param, default=None, type=str):
try:
value = request.args.get(param, request.form.get(param))
if value is None:
return default
else:
return type(value)
except (ValueError, TypeError) as e:
raise ParamError("Could not interpret {0}. {1}".format(param, str(e)))
def read_bar_split_param(request, param, default=None, type=str):
values = read_param(request, param, default=default)
if values is None:
return []
try:
return [type(value) for value in values.split("|")]
except (ValueError, TypeError) as e:
raise ParamError("Could not interpret {0}. {1}"
.format(param, str(e)))
def format_error(error):
error_type = error.__class__.__name__
message = str(error)
return {'error': {'type': error_type, 'message': message}}
def build_score_request(scoring_system, request, context_name=None, rev_id=None,
model_name=None):
"""
Build an :class:`ores.ScoreRequest` from information contained in a
request.
:Parameters:
scoring_system : :class:`ores.ScoringSystem`
A scoring system to build request with
request : :class:`flask.Request`
A web request to extract information from
context_name : `str`
The name of the context to perform scoring
rev_id : int
The revision ID to score. Note that multiple IDs can be provided
in `request.args`
model_name = `str`
The name of the model to score. Note that multiple models can be
provided in `request.args`
"""
rev_ids = parse_rev_ids(request, rev_id)
model_names = parse_model_names(request, model_name)
precache = 'precache' in request.args
include_features = 'features' in request.args
injection_caches = parse_injection(request, rev_id)
model_info = parse_model_info(request)
if context_name and context_name in scoring_system and not model_names:
model_names = scoring_system[context_name].keys()
# WMF specific solution
if request.headers.get('X-Client-IP') is None:
ip = request.remote_addr.strip()
else:
ip = request.headers['X-Client-IP'].strip()
return ScoreRequest(context_name, rev_ids, model_names,
precache=precache,
include_features=include_features,
injection_caches=injection_caches,
model_info=model_info,
ip=ip)
def parse_rev_ids(request, rev_id):
if rev_id is not None:
return [int(rev_id)]
else:
return read_bar_split_param(request, "revids", type=int)
def parse_model_names(request, model_name):
if model_name is not None:
return [model_name]
else:
return read_bar_split_param(request, "models", type=str)
def parse_model_info(request):
return read_bar_split_param(request, "model_info", type=str)
def build_score_request_from_event(precache_map, event):
context_name = event['database']
rev_id = event['rev_id']
# Check to see if we have the context available in our precache_map
if context_name not in precache_map:
return None
# Start building the response document
event_set = build_event_set(event)
model_names = {m for e in event_set if e in precache_map[context_name]
for m in precache_map[context_name][e]}
if len(model_names) == 0:
return None
return ScoreRequest(context_name, [rev_id], model_names, precache=True)
# TODO: This strategy for building up events is not sustainable.
def build_event_set(event):
"""
Turn an EventStream event into a set of event types that ORES
uses internally.
"""
event_set = set()
if re.match(r"([^\.]+.)?mediawiki\.revision-create$",
event['meta']['topic']):
event_set.add('edit')
user_groups = event.get('performer', {}).get('user_groups', [])
if 'bot' in user_groups:
event_set.add('bot_edit')
else:
event_set.add('nonbot_edit')
if not event.get('rev_parent_id'):
event_set.add('page_creation')
if 'bot' in user_groups:
event_set.add('bot_page_creation')
else:
event_set.add('nonbot_page_creation')
return event_set
AVAILABLE_EVENTS = {'edit', 'bot_edit', 'nonbot_edit', 'page_creation',
'bot_page_creation', 'nonbot_page_creation'}
def build_precache_map(config):
"""
Build a mapping of contexts and models from the configuration
"""
precache_map = {}
ss_name = config['ores']['scoring_system']
for context in config['scoring_systems'][ss_name]['scoring_contexts']:
precache_map[context] = {}
for model in config['scoring_contexts'][context].get('precache', []):
precached_config = \
config['scoring_contexts'][context]['precache'][model]
events = precached_config['on']
if len(set(events) - AVAILABLE_EVENTS) > 0:
logger.warning("{0} events are not available"
.format(set(events) - AVAILABLE_EVENTS))
for event in precached_config['on']:
if event in precache_map[context]:
precache_map[context][event].add(model)
else:
precache_map[context][event] = {model}
logger.debug("Setting up precaching for {0} in {1} on {2}"
.format(model, context, event))
return precache_map
|
wikimedia/ores | ores/wsgi/util.py | build_event_set | python | def build_event_set(event):
event_set = set()
if re.match(r"([^\.]+.)?mediawiki\.revision-create$",
event['meta']['topic']):
event_set.add('edit')
user_groups = event.get('performer', {}).get('user_groups', [])
if 'bot' in user_groups:
event_set.add('bot_edit')
else:
event_set.add('nonbot_edit')
if not event.get('rev_parent_id'):
event_set.add('page_creation')
if 'bot' in user_groups:
event_set.add('bot_page_creation')
else:
event_set.add('nonbot_page_creation')
return event_set | Turn an EventStream event into a set of event types that ORES
uses internally. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/wsgi/util.py#L179-L202 | null | import json
import logging
import re
from flask_jsonpify import jsonify as flask_jsonify
from ..score_request import ScoreRequest
logger = logging.getLogger(__name__)
class CacheParsingError(Exception):
pass
class ParamError(Exception):
pass
def jsonify(doc):
return flask_jsonify(normalize_json(doc))
def normalize_json(doc):
if isinstance(doc, dict):
return {_ensure_str_key(k): normalize_json(v) for k, v in doc.items()}
else:
return doc
def _ensure_str_key(key):
if isinstance(key, bool):
return "true" if key else "false"
else:
return str(key)
def read_param(request, param, default=None, type=str):
try:
value = request.args.get(param, request.form.get(param))
if value is None:
return default
else:
return type(value)
except (ValueError, TypeError) as e:
raise ParamError("Could not interpret {0}. {1}".format(param, str(e)))
def read_bar_split_param(request, param, default=None, type=str):
values = read_param(request, param, default=default)
if values is None:
return []
try:
return [type(value) for value in values.split("|")]
except (ValueError, TypeError) as e:
raise ParamError("Could not interpret {0}. {1}"
.format(param, str(e)))
def format_error(error):
error_type = error.__class__.__name__
message = str(error)
return {'error': {'type': error_type, 'message': message}}
def build_score_request(scoring_system, request, context_name=None, rev_id=None,
model_name=None):
"""
Build an :class:`ores.ScoreRequest` from information contained in a
request.
:Parameters:
scoring_system : :class:`ores.ScoringSystem`
A scoring system to build request with
request : :class:`flask.Request`
A web request to extract information from
context_name : `str`
The name of the context to perform scoring
rev_id : int
The revision ID to score. Note that multiple IDs can be provided
in `request.args`
model_name = `str`
The name of the model to score. Note that multiple models can be
provided in `request.args`
"""
rev_ids = parse_rev_ids(request, rev_id)
model_names = parse_model_names(request, model_name)
precache = 'precache' in request.args
include_features = 'features' in request.args
injection_caches = parse_injection(request, rev_id)
model_info = parse_model_info(request)
if context_name and context_name in scoring_system and not model_names:
model_names = scoring_system[context_name].keys()
# WMF specific solution
if request.headers.get('X-Client-IP') is None:
ip = request.remote_addr.strip()
else:
ip = request.headers['X-Client-IP'].strip()
return ScoreRequest(context_name, rev_ids, model_names,
precache=precache,
include_features=include_features,
injection_caches=injection_caches,
model_info=model_info,
ip=ip)
def parse_rev_ids(request, rev_id):
if rev_id is not None:
return [int(rev_id)]
else:
return read_bar_split_param(request, "revids", type=int)
def parse_model_names(request, model_name):
if model_name is not None:
return [model_name]
else:
return read_bar_split_param(request, "models", type=str)
def parse_injection(request, rev_id):
"""Parse values for features / datasources of interest."""
cache = {}
if 'inject' in request.values:
try:
cache = json.loads(request.values['inject'])
except json.JSONDecodeError as e:
raise CacheParsingError(e)
if rev_id is not None:
rev_cache = cache
try:
for k, v in request.values.items():
if k.startswith(("feature.", "datasource.")):
rev_cache[k] = json.loads(v)
except json.JSONDecodeError as e:
raise CacheParsingError(e)
if len(rev_cache) > 0:
cache = {rev_id: rev_cache}
else:
cache = {int(rev_id): c for rev_id, c in cache.items()}
return cache or None
def parse_model_info(request):
return read_bar_split_param(request, "model_info", type=str)
def build_score_request_from_event(precache_map, event):
context_name = event['database']
rev_id = event['rev_id']
# Check to see if we have the context available in our precache_map
if context_name not in precache_map:
return None
# Start building the response document
event_set = build_event_set(event)
model_names = {m for e in event_set if e in precache_map[context_name]
for m in precache_map[context_name][e]}
if len(model_names) == 0:
return None
return ScoreRequest(context_name, [rev_id], model_names, precache=True)
# TODO: This strategy for building up events is not sustainable.
AVAILABLE_EVENTS = {'edit', 'bot_edit', 'nonbot_edit', 'page_creation',
'bot_page_creation', 'nonbot_page_creation'}
def build_precache_map(config):
"""
Build a mapping of contexts and models from the configuration
"""
precache_map = {}
ss_name = config['ores']['scoring_system']
for context in config['scoring_systems'][ss_name]['scoring_contexts']:
precache_map[context] = {}
for model in config['scoring_contexts'][context].get('precache', []):
precached_config = \
config['scoring_contexts'][context]['precache'][model]
events = precached_config['on']
if len(set(events) - AVAILABLE_EVENTS) > 0:
logger.warning("{0} events are not available"
.format(set(events) - AVAILABLE_EVENTS))
for event in precached_config['on']:
if event in precache_map[context]:
precache_map[context][event].add(model)
else:
precache_map[context][event] = {model}
logger.debug("Setting up precaching for {0} in {1} on {2}"
.format(model, context, event))
return precache_map
|
wikimedia/ores | ores/wsgi/util.py | build_precache_map | python | def build_precache_map(config):
precache_map = {}
ss_name = config['ores']['scoring_system']
for context in config['scoring_systems'][ss_name]['scoring_contexts']:
precache_map[context] = {}
for model in config['scoring_contexts'][context].get('precache', []):
precached_config = \
config['scoring_contexts'][context]['precache'][model]
events = precached_config['on']
if len(set(events) - AVAILABLE_EVENTS) > 0:
logger.warning("{0} events are not available"
.format(set(events) - AVAILABLE_EVENTS))
for event in precached_config['on']:
if event in precache_map[context]:
precache_map[context][event].add(model)
else:
precache_map[context][event] = {model}
logger.debug("Setting up precaching for {0} in {1} on {2}"
.format(model, context, event))
return precache_map | Build a mapping of contexts and models from the configuration | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/wsgi/util.py#L209-L233 | null | import json
import logging
import re
from flask_jsonpify import jsonify as flask_jsonify
from ..score_request import ScoreRequest
logger = logging.getLogger(__name__)
class CacheParsingError(Exception):
pass
class ParamError(Exception):
pass
def jsonify(doc):
return flask_jsonify(normalize_json(doc))
def normalize_json(doc):
if isinstance(doc, dict):
return {_ensure_str_key(k): normalize_json(v) for k, v in doc.items()}
else:
return doc
def _ensure_str_key(key):
if isinstance(key, bool):
return "true" if key else "false"
else:
return str(key)
def read_param(request, param, default=None, type=str):
try:
value = request.args.get(param, request.form.get(param))
if value is None:
return default
else:
return type(value)
except (ValueError, TypeError) as e:
raise ParamError("Could not interpret {0}. {1}".format(param, str(e)))
def read_bar_split_param(request, param, default=None, type=str):
values = read_param(request, param, default=default)
if values is None:
return []
try:
return [type(value) for value in values.split("|")]
except (ValueError, TypeError) as e:
raise ParamError("Could not interpret {0}. {1}"
.format(param, str(e)))
def format_error(error):
error_type = error.__class__.__name__
message = str(error)
return {'error': {'type': error_type, 'message': message}}
def build_score_request(scoring_system, request, context_name=None, rev_id=None,
model_name=None):
"""
Build an :class:`ores.ScoreRequest` from information contained in a
request.
:Parameters:
scoring_system : :class:`ores.ScoringSystem`
A scoring system to build request with
request : :class:`flask.Request`
A web request to extract information from
context_name : `str`
The name of the context to perform scoring
rev_id : int
The revision ID to score. Note that multiple IDs can be provided
in `request.args`
model_name = `str`
The name of the model to score. Note that multiple models can be
provided in `request.args`
"""
rev_ids = parse_rev_ids(request, rev_id)
model_names = parse_model_names(request, model_name)
precache = 'precache' in request.args
include_features = 'features' in request.args
injection_caches = parse_injection(request, rev_id)
model_info = parse_model_info(request)
if context_name and context_name in scoring_system and not model_names:
model_names = scoring_system[context_name].keys()
# WMF specific solution
if request.headers.get('X-Client-IP') is None:
ip = request.remote_addr.strip()
else:
ip = request.headers['X-Client-IP'].strip()
return ScoreRequest(context_name, rev_ids, model_names,
precache=precache,
include_features=include_features,
injection_caches=injection_caches,
model_info=model_info,
ip=ip)
def parse_rev_ids(request, rev_id):
if rev_id is not None:
return [int(rev_id)]
else:
return read_bar_split_param(request, "revids", type=int)
def parse_model_names(request, model_name):
if model_name is not None:
return [model_name]
else:
return read_bar_split_param(request, "models", type=str)
def parse_injection(request, rev_id):
"""Parse values for features / datasources of interest."""
cache = {}
if 'inject' in request.values:
try:
cache = json.loads(request.values['inject'])
except json.JSONDecodeError as e:
raise CacheParsingError(e)
if rev_id is not None:
rev_cache = cache
try:
for k, v in request.values.items():
if k.startswith(("feature.", "datasource.")):
rev_cache[k] = json.loads(v)
except json.JSONDecodeError as e:
raise CacheParsingError(e)
if len(rev_cache) > 0:
cache = {rev_id: rev_cache}
else:
cache = {int(rev_id): c for rev_id, c in cache.items()}
return cache or None
def parse_model_info(request):
return read_bar_split_param(request, "model_info", type=str)
def build_score_request_from_event(precache_map, event):
context_name = event['database']
rev_id = event['rev_id']
# Check to see if we have the context available in our precache_map
if context_name not in precache_map:
return None
# Start building the response document
event_set = build_event_set(event)
model_names = {m for e in event_set if e in precache_map[context_name]
for m in precache_map[context_name][e]}
if len(model_names) == 0:
return None
return ScoreRequest(context_name, [rev_id], model_names, precache=True)
# TODO: This strategy for building up events is not sustainable.
def build_event_set(event):
"""
Turn an EventStream event into a set of event types that ORES
uses internally.
"""
event_set = set()
if re.match(r"([^\.]+.)?mediawiki\.revision-create$",
event['meta']['topic']):
event_set.add('edit')
user_groups = event.get('performer', {}).get('user_groups', [])
if 'bot' in user_groups:
event_set.add('bot_edit')
else:
event_set.add('nonbot_edit')
if not event.get('rev_parent_id'):
event_set.add('page_creation')
if 'bot' in user_groups:
event_set.add('bot_page_creation')
else:
event_set.add('nonbot_page_creation')
return event_set
AVAILABLE_EVENTS = {'edit', 'bot_edit', 'nonbot_edit', 'page_creation',
'bot_page_creation', 'nonbot_page_creation'}
|
wikimedia/ores | ores/score_caches/redis.py | RedisSentinel.from_config | python | def from_config(cls, config, name, section_key="score_caches"):
sentinel_logger.info("Loading RedisSentinel '{0}' from config.".format(name))
section = config[section_key][name]
kwargs = {k: v for k, v in section.items() if k != "class"}
return cls.from_parameters(**kwargs) | score_caches:
redis_sentinel:
class: ores.score_caches.RedisSentinel
prefix: ores-derp
ttl: 9001
socket_timeout: 0.1
cluster: mymaster
hosts:
- localhost:5000
- localhost:5001
- localhost:5002 | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/score_caches/redis.py#L148-L167 | null | class RedisSentinel(ScoreCache):
def __init__(self, sentinel, ttl=None, prefix=None, cluster=None, socket_timeout=None):
self.sentinel = sentinel
self.ttl = int(ttl or TTL)
self.prefix = str(prefix or PREFIX)
self.cluster = str(cluster or CLUSTER)
self.socket_timeout = float(socket_timeout or SOCKET_TIMEOUT)
def lookup(self, context_name, model_name, rev_id, version=None,
injection_cache=None):
key = self._generate_key(
context_name, model_name, rev_id, version=version,
injection_cache=injection_cache)
replica = self.sentinel.slave_for(self.cluster, socket_timeout=self.socket_timeout)
sentinel_logger.debug("Looking up score at {0} in replica".format(key))
value = replica.get(key)
if value is None:
raise KeyError(key)
else:
return json.loads(str(value, 'utf-8'))
def store(self, score, context_name, model_name, rev_id, version=None,
injection_cache=None):
key = self._generate_key(
context_name, model_name, rev_id, version=version,
injection_cache=injection_cache)
master = self.sentinel.master_for(self.cluster, socket_timeout=self.socket_timeout)
sentinel_logger.debug("Storing score at {0} in master".format(key))
master.setex(key, self.ttl, bytes(json.dumps(score), 'utf-8'))
def _generate_key(self, wiki, model, rev_id, version=None,
injection_cache=None):
if injection_cache is None or len(injection_cache) == 0:
key_values = [self.prefix, wiki, model, rev_id, version]
else:
cache_hash = Redis.hash_cache(injection_cache)
key_values = [self.prefix, wiki, model, rev_id, version,
cache_hash]
return ":".join(str(v) for v in key_values)
@classmethod
def from_parameters(cls, hosts, ttl=None, prefix=None, cluster=None,
socket_timeout=None):
try:
from redis.sentinel import Sentinel
except ImportError:
raise ImportError("Could not find redis-py. This packages is " +
"required when using ores.score_caches.RedisSentinel.")
hosts = [i.split(':') for i in hosts]
return cls(Sentinel(hosts, socket_timeout=socket_timeout),
ttl=ttl, prefix=prefix, cluster=cluster,
socket_timeout=socket_timeout)
@classmethod
|
wikimedia/ores | ores/scoring/models/rev_id_scorer.py | RevIdScorer.calculate_statistics | python | def calculate_statistics(self):
"Jam some data through to generate statistics"
rev_ids = range(0, 100, 1)
feature_values = zip(rev_ids, [0] * 100)
scores = [self.score(f) for f in feature_values]
labels = [s['prediction'] for s in scores]
statistics = Classification(labels, threshold_ndigits=1, decision_key='probability')
score_labels = list(zip(scores, labels))
statistics.fit(score_labels)
return statistics | Jam some data through to generate statistics | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/scoring/models/rev_id_scorer.py#L67-L76 | null | class RevIdScorer(Model):
"""
Implements a basic, testing scorer that predicts whether a revision ID's
reversed last two digits are greater than 50.
E.g. 974623 = 32 and 23754929 = 92
"""
def __init__(self, version=None):
super().__init__([reversed_last_two_in_rev_id, delay], version=version)
self.info = ModelInfo()
self.info['version'] = version
self.info['type'] = "RevIDScorer"
self.info['behavior'] = "Returns the last two digits in a rev_id " + \
"as a score."
self.info['statistics'] = self.calculate_statistics()
def score(self, feature_values):
last_two_in_rev_id, delay = feature_values
time.sleep(delay)
probability = last_two_in_rev_id / 100
if probability > 0.5:
prediction = True
else:
prediction = False
return {
'prediction': prediction,
'probability': {
True: probability,
False: 1 - probability
}
}
@classmethod
def from_config(cls, config, name, section_key='scorer_models'):
section = config[section_key][name]
return cls(**{k: v for k, v in section.items() if k != "class"})
|
wikimedia/ores | ores/wsgi/routes/v1/util.py | format_v1_score_response | python | def format_v1_score_response(response, limit_to_model=None):
response_doc = defaultdict(dict)
for rev_id, rev_scores in response.scores.items():
for model_name, score in rev_scores.items():
response_doc[rev_id][model_name] = score
for rev_id, rev_errors in response.errors.items():
for model_name, error in rev_errors.items():
response_doc[rev_id][model_name] = util.format_error(error)
if limit_to_model is not None:
return util.jsonify({rev_id: model_scores[limit_to_model]
for rev_id, model_scores in response_doc.items()})
else:
return util.jsonify(response_doc) | The response format looks like this::
{
"<rev_id>": {
"<model_name>": <score>
"<model_name>": <score>
},
"<rev_id>": {
"<model_name>": <score>
"<model_name>": <score>
}
} | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/wsgi/routes/v1/util.py#L6-L34 | [
"def jsonify(doc):\n return flask_jsonify(normalize_json(doc))\n",
"def format_error(error):\n error_type = error.__class__.__name__\n message = str(error)\n\n return {'error': {'type': error_type, 'message': message}}\n"
] | from collections import defaultdict
from ... import util
def format_some_model_info(scoring_system, request, limit_to_model=None):
scoring_system.check_context_models(request)
model_infos = {}
for model_name in request.model_names:
model_info = \
scoring_system[request.context_name].format_model_info(
model_name, request.model_info)
model_infos[model_name] = model_info
if limit_to_model is None:
return util.jsonify({'models': model_infos})
else:
return util.jsonify(model_infos[model_name])
|
wikimedia/ores | setup.py | requirements | python | def requirements(fname):
with open(fname) as f:
for line in f:
match = re.search('#egg=(.*)$', line)
if match:
yield match.groups()[0]
else:
yield line.strip() | Generator to parse requirements.txt file
Supports bits of extended pip format (git urls) | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/setup.py#L20-L32 | null | import os
import platform
import re
import sys
from setuptools import find_packages, setup
about_path = os.path.join(os.path.dirname(__file__), "ores/about.py")
exec(compile(open(about_path).read(), about_path, "exec"))
if sys.version_info <= (3, 0):
print("ORES needs Python 3 to run properly. Your version is " + platform.python_version())
sys.exit(1)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
python_requires=">=3",
name=__name__, # noqa
version=__version__, # noqa
author=__author__, # noqa
author_email=__author_email__, # noqa
description=__description__, # noqa
url=__url__, # noqa
license=__license__, # noqa
entry_points={
'console_scripts': [
'ores = ores.ores:main',
],
},
packages=find_packages(),
include_package_data=True,
long_description=read('README.md'),
install_requires=list(requirements("requirements.txt")),
extras_require={
# Install ores[redis] if your deployment will use the Redis scoring
# cache and Celery backend.
"redis": [
"pylru",
"redis",
],
},
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
)
|
wikimedia/ores | ores/api.py | Session.score | python | def score(self, context, models, revids):
if isinstance(revids, int):
rev_ids = [revids]
else:
rev_ids = [int(rid) for rid in revids]
return self._score(context, models, rev_ids) | Genetate scores for model applied to a sequence of revisions.
:Parameters:
context : str
The name of the context -- usually the database name of a wiki
models : `iterable`
The names of a models to apply
revids : `iterable`
A sequence of revision IDs to score. | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/api.py#L68-L85 | [
"def _score(self, context, models, rev_ids):\n logging.debug(\"Starting up thread pool with {0} workers\"\n .format(self.workers))\n with ThreadPoolExecutor(max_workers=self.workers) as executor:\n future_rev_ids = [] # A list of future results and the revids\n\n # This loop loads all rev_id_batch's into the executor for\n # processing\n for rev_id_batch in chunked(rev_ids, self.batch_size):\n rev_id_batch = list(rev_id_batch)\n logging.debug(\"Starting batch of {0} revids\"\n .format(len(rev_id_batch)))\n future_rev_ids.append((\n executor.submit(self._score_request, context, rev_id_batch,\n models),\n rev_id_batch))\n\n # This loop blocks on reading the futures as soon as they are ready\n for (future, rev_id_batch) in future_rev_ids:\n try:\n for score in future.result():\n yield score\n except RuntimeError as e:\n logger.warning(\n \"An ORES scoring job failed with the following error:\")\n logger.warning(traceback.format_exc())\n for rev_id in rev_id_batch:\n yield {m: {\"error\": e.args[0]}\n for m in models}\n"
] | class Session:
"""
Constructs a session with an ORES API and provides facilities for scoring
revisions in batch and parallel.
:Parameters:
host : str
The host of ORES to connect to (preceed with http:// or https://)
user_agent : str
A User-Agent header to send with every request
batch_size : int
The number of scores to batch per request.
parallel_request : int
The maximum number of requests to make in parallel
retries : int
The maximum number of retries for basic HTTP errors before giving
up
"""
DEFAULT_USERAGENT = "ores.api default user-agent"
def __init__(self, host, user_agent=None, session=None,
retries=5, batch_size=50, parallel_requests=4):
self.host = str(host)
if session is not None:
self._session = session
else:
self.retries = int(retries)
self._session = requests.Session()
self._session.mount(self.host,
requests.adapters.HTTPAdapter(max_retries=retries))
self.batch_size = int(batch_size)
self.workers = int(parallel_requests)
self.headers = {}
if user_agent is None:
logger.warning("Sending requests with default User-Agent. " +
"Set 'user_agent' on oresapi.Session to " +
"quiet this message.")
self.headers['User-Agent'] = self.DEFAULT_USERAGENT
else:
self.headers['User-Agent'] = user_agent
def _score(self, context, models, rev_ids):
logging.debug("Starting up thread pool with {0} workers"
.format(self.workers))
with ThreadPoolExecutor(max_workers=self.workers) as executor:
future_rev_ids = [] # A list of future results and the revids
# This loop loads all rev_id_batch's into the executor for
# processing
for rev_id_batch in chunked(rev_ids, self.batch_size):
rev_id_batch = list(rev_id_batch)
logging.debug("Starting batch of {0} revids"
.format(len(rev_id_batch)))
future_rev_ids.append((
executor.submit(self._score_request, context, rev_id_batch,
models),
rev_id_batch))
# This loop blocks on reading the futures as soon as they are ready
for (future, rev_id_batch) in future_rev_ids:
try:
for score in future.result():
yield score
except RuntimeError as e:
logger.warning(
"An ORES scoring job failed with the following error:")
logger.warning(traceback.format_exc())
for rev_id in rev_id_batch:
yield {m: {"error": e.args[0]}
for m in models}
def _score_request(self, context, rev_ids, models):
url = self.host + "/v3/scores/{0}/".format(urllib.parse.quote(context))
params = {'revids': "|".join(str(rid) for rid in rev_ids),
'models': "|".join(urllib.parse.quote(model)
for model in models)}
logging.debug("Sending score request for {0} revisions"
.format(len(rev_ids)))
start = time.time()
response = self._session.get(url, params=params,
headers=self.headers,
verify=True, stream=True)
try:
doc = response.json()
except ValueError:
raise RuntimeError("Non-json response: " + response.text[:100])
logging.debug("Score request completed for " +
"{0} revisions completed in {1} seconds"
.format(len(rev_ids), round(time.time() - start, 3)))
if 'error' in doc:
# TODO: custom class
raise RuntimeError(doc['error'])
if 'warnings' in doc:
for warning_doc in doc['warnings']:
logger.warn(warning_doc)
return [doc[context]['scores'][str(rev_id)]
for rev_id in rev_ids]
|
wikimedia/ores | ores/wsgi/routes/v2/util.py | format_v2_score_response | python | def format_v2_score_response(request, response):
return util.jsonify({"scores": {
response.context.name: {
model_name: format_v2_model(request, response, model_name)
for model_name in response.request.model_names}}}) | {
"scores": {
"<context>": {
"<model_name>": {
"scores": {
"<rev_id>": <score>,
"<rev_id>": <score>
},
"features": {
"<rev_id>": <features>,
"<rev_id>": <features>
},
"info": <model_info>,
"version": "<model_version>"
}
"<model_name>": {
"scores": {
"<rev_id>": <score>,
"<rev_id>": <score>
},
"features": {
"<rev_id>": <features>,
"<rev_id>": <features>
},
"info": <model_info>,
"version": "<model_version>"
}
}
}
} | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/wsgi/routes/v2/util.py#L7-L43 | [
"def jsonify(doc):\n return flask_jsonify(normalize_json(doc))\n"
] | import traceback
from collections import defaultdict
from ... import responses, util
def format_v2_model(request, response, model_name):
model_doc = defaultdict(dict)
model_doc['version'] = response.context.model_version(model_name)
if request.model_info and model_name in response.model_info:
model_doc['info'] = response.model_info[model_name]
for rev_id, rev_scores in response.scores.items():
if model_name in rev_scores:
model_doc['scores'][rev_id] = rev_scores[model_name]
for rev_id, rev_errors in response.errors.items():
if model_name in rev_errors:
model_doc['scores'][rev_id] = \
util.format_error(rev_errors[model_name])
for rev_id, rev_features in response.features.items():
if model_name in rev_features:
model_doc['features'][rev_id] = rev_features[model_name]
return model_doc
def build_v2_context_model_map(score_request, scoring_system):
"""
{
"scores": {
"<context>": {
"<model_name>": {
"version": "<model_version>",
"info": <model_info>
}
},
"<context>": {
"<model_name>": {
"version": "<model_version>",
"info": <model_info>
}
}
}
"""
try:
context_models_doc = {}
for context_name, context in scoring_system.items():
context_models_doc[context_name] = {}
for model_name in context:
model_doc = {'version': context.model_version(model_name)}
if score_request.model_info:
model_doc['info'] = context.format_model_info(
model_name, score_request.model_info)
context_models_doc[context_name][model_name] = model_doc
return util.jsonify({'scores': context_models_doc})
except Exception:
return responses.unknown_error(traceback.format_exc())
|
wikimedia/ores | ores/wsgi/routes/v2/util.py | build_v2_context_model_map | python | def build_v2_context_model_map(score_request, scoring_system):
try:
context_models_doc = {}
for context_name, context in scoring_system.items():
context_models_doc[context_name] = {}
for model_name in context:
model_doc = {'version': context.model_version(model_name)}
if score_request.model_info:
model_doc['info'] = context.format_model_info(
model_name, score_request.model_info)
context_models_doc[context_name][model_name] = model_doc
return util.jsonify({'scores': context_models_doc})
except Exception:
return responses.unknown_error(traceback.format_exc()) | {
"scores": {
"<context>": {
"<model_name>": {
"version": "<model_version>",
"info": <model_info>
}
},
"<context>": {
"<model_name>": {
"version": "<model_version>",
"info": <model_info>
}
}
} | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/wsgi/routes/v2/util.py#L70-L100 | [
"def jsonify(doc):\n return flask_jsonify(normalize_json(doc))\n",
"def unknown_error(message):\n logger.error(message)\n return error(500, 'internal server error', message)\n"
] | import traceback
from collections import defaultdict
from ... import responses, util
def format_v2_score_response(request, response):
"""
{
"scores": {
"<context>": {
"<model_name>": {
"scores": {
"<rev_id>": <score>,
"<rev_id>": <score>
},
"features": {
"<rev_id>": <features>,
"<rev_id>": <features>
},
"info": <model_info>,
"version": "<model_version>"
}
"<model_name>": {
"scores": {
"<rev_id>": <score>,
"<rev_id>": <score>
},
"features": {
"<rev_id>": <features>,
"<rev_id>": <features>
},
"info": <model_info>,
"version": "<model_version>"
}
}
}
}
"""
return util.jsonify({"scores": {
response.context.name: {
model_name: format_v2_model(request, response, model_name)
for model_name in response.request.model_names}}})
def format_v2_model(request, response, model_name):
model_doc = defaultdict(dict)
model_doc['version'] = response.context.model_version(model_name)
if request.model_info and model_name in response.model_info:
model_doc['info'] = response.model_info[model_name]
for rev_id, rev_scores in response.scores.items():
if model_name in rev_scores:
model_doc['scores'][rev_id] = rev_scores[model_name]
for rev_id, rev_errors in response.errors.items():
if model_name in rev_errors:
model_doc['scores'][rev_id] = \
util.format_error(rev_errors[model_name])
for rev_id, rev_features in response.features.items():
if model_name in rev_features:
model_doc['features'][rev_id] = rev_features[model_name]
return model_doc
|
wikimedia/ores | ores/wsgi/routes/v3/util.py | format_v3_score_response | python | def format_v3_score_response(response):
context_doc = defaultdict(lambda: defaultdict(dict))
if len(response.scores) > 0 or len(response.errors) > 0:
for rev_id, rev_scores in response.scores.items():
for model_name, score in rev_scores.items():
context_doc['scores'][rev_id][model_name] = \
{'score': score}
for rev_id, rev_errors in response.errors.items():
for model_name, error in rev_errors.items():
context_doc['scores'][rev_id][model_name] = \
util.format_error(error)
for rev_id, rev_features in response.features.items():
for model_name, features in rev_features.items():
context_doc['scores'][rev_id][model_name]['features'] = \
features
if len(response.model_info) > 0:
context_doc['models'] = {
model_name: info_doc
for model_name, info_doc in response.model_info.items()}
return util.jsonify({response.context.name: context_doc}) | {
"<context_name>": {
"scores": {
"<rev_id>": {
"<model_name>": {
"score": <score>,
"features": <features>
},
"<model_name>": {
"score": <score>,
"features": <features>
}
},
"<rev_id>": {
"<model_name>": {
"score": <score>,
"features": <features>
},
"<model_name>": {
"score": <score>,
"features": <features>
}
}
},
"models": {
"<model_name>": <model_info>,
"<model_name>": <model_info>
}
}
} | train | https://github.com/wikimedia/ores/blob/75599b6ba0172c86d94f7f7e1e05a3c282333a18/ores/wsgi/routes/v3/util.py#L10-L65 | [
"def jsonify(doc):\n return flask_jsonify(normalize_json(doc))\n",
"def format_error(error):\n error_type = error.__class__.__name__\n message = str(error)\n\n return {'error': {'type': error_type, 'message': message}}\n"
] | import traceback
from collections import defaultdict
from revscoring.errors import ModelInfoLookupError
from .... import errors
from ... import responses, util
def build_v3_context_model_map(score_request, scoring_system):
"""
{
"<context>": {
"models": {
"<model_name>": <model_info>,
"<model_name>": <model_info>
}
},
"<context>": {
"models": {
"<model_name>": <model_info>,
"<model_name>": <model_info>
}
}
}
"""
try:
context_models_doc = {}
for context_name, context in scoring_system.items():
context_models_doc[context_name] = {'models': {}}
for model_name in context:
model_doc = context.format_model_info(
model_name, score_request.model_info or ['version'])
context_models_doc[context_name]['models'][model_name] = model_doc
return util.jsonify(context_models_doc)
except Exception:
return responses.unknown_error(traceback.format_exc())
def process_score_request(score_request, scoring_system):
score_request.model_info = score_request.model_info or ['version']
try:
score_response = scoring_system.score(score_request)
return format_v3_score_response(score_response)
except errors.ScoreProcessorOverloaded:
scoring_system.metrics_collector.response_made(
responses.SERVER_OVERLOADED, score_request)
return responses.server_overloaded()
except errors.MissingContext as e:
scoring_system.metrics_collector.response_made(
responses.NOT_FOUND, score_request)
return responses.not_found("No scorers available for {0}"
.format(e))
except errors.MissingModels as e:
scoring_system.metrics_collector.response_made(
responses.NOT_FOUND, score_request)
context_name, model_names = e.args
return responses.not_found(
"Models {0} not available for {1}"
.format(tuple(model_names), context_name))
except ModelInfoLookupError as e:
return responses.model_info_lookup_error(e)
except errors.TimeoutError:
scoring_system.metrics_collector.response_made(
responses.TIMEOUT, score_request)
return responses.timeout_error()
except errors.TooManyRequestsError:
scoring_system.metrics_collector.response_made(
responses.TOO_MANY_REQUESTS, score_request)
return responses.too_many_requests_error()
except Exception:
return responses.unknown_error(traceback.format_exc())
|
vpelletier/pprofile | zpprofile.py | disassemble | python | def disassemble(co, lasti=-1):
# Taken from dis.disassemble, returns disassembled code instead of printing
# it (the fuck python ?).
# Also, unicodified.
# Also, use % operator instead of string operations.
# Also, one statement per line.
out = StringIO()
code = co.co_code
labels = dis.findlabels(code)
linestarts = dict(dis.findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
c = code[i]
op = ord(c)
if i in linestarts:
if i > 0:
print(end=u'\n', file=out)
print(u'%3d' % linestarts[i], end=u' ', file=out)
else:
print(u' ', end=u' ', file=out)
if i == lasti:
print(u'-->', end=u' ', file=out)
else:
print(u' ', end=u' ', file=out)
if i in labels:
print(u'>>', end=u' ', file=out)
else:
print(u' ', end=u' ', file=out)
print(u'%4i' % i, end=u' ', file=out)
print(u'%-20s' % dis.opname[op], end=u' ', file=out)
i = i + 1
if op >= dis.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i = i + 2
if op == dis.EXTENDED_ARG:
extended_arg = oparg * 65536
print(u'%5i' % oparg, end=u' ', file=out)
if op in dis.hasconst:
print(u'(%r)' % co.co_consts[oparg], end=u' ', file=out)
elif op in dis.hasname:
print(u'(%s)' % co.co_names[oparg], end=u' ', file=out)
elif op in dis.hasjrel:
print(u'(to %r)' % (i + oparg), end=u' ', file=out)
elif op in dis.haslocal:
print(u'(%s)' % co.co_varnames[oparg], end=u' ', file=out)
elif op in dis.hascompare:
print(u'(%s)' % dis.cmp_op[oparg], end=u' ', file=out)
elif op in dis.hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print(u'(%s)' % free[oparg], end=u' ', file=out)
print(end=u'\n', file=out)
return out.getvalue() | Disassemble a code object. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/zpprofile.py#L176-L234 | null | # Copyright (C) 2016-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Zope-friendly layer for pprofile.
In Zope:
- Executed code is not necessarily a valid FS path (ex: Python Scripts)
- Executed code is not available to the machine where profiling results are
analysed.
- Restricted Python cannot manipulate all desired types, and one may want to
trigger profiling from its level.
This layer addresses all these issues, by making interesting pprofile classes
accessible to restricted python and bundling source code wxith profiling
results.
NOTE: This does allow anyone able to get profiler output to get whole source
files from your server. So better keep good track of who can profile and/or
where profiling results end. Alone, this module won't be accessible from
Restricted Python.
Example deterministic usage:
# Get profiler (how you get to zpprofile module depends on your
# application).
profiler = zpprofile.getProfiler()
# Get callable (to not profile how it is retrieved).
func = context.somethingOrOther
# Actually profile stuff
with profiler:
func()
# Build response
response = context.REQUEST.RESPONSE
data, content_type = profiler.asZip()
response.setHeader('content-type', content_type)
response.setHeader(
'content-disposition',
'attachment; filename="' + func.id + '.zip"',
)
# Push response immediately (hopefully, profiled function did not write
# anything on its own).
response.write(data)
# Make transaction fail, so any otherwise persistent change made by
# profiled function is undone - note that many caches will still have
# been warmed up, just as with any other code.
raise Exception('profiling')
Example statistic usage (to profile other running threads):
from time import sleep
# Get profiler (how you get to zpprofile module depends on your
# application).
profiler, thread = zpprofile.getStatisticalProfilerAndThread(single=False)
# Actually profile whatever is going on in the same process, just waiting.
with thread:
sleep(60)
# Build response
response = context.REQUEST.RESPONSE
data, content_type = profiler.asZip()
response.setHeader('content-type', content_type)
response.setHeader(
'content-disposition',
'attachment; filename="statistical_' +
DateTime().strftime('%Y%m%d%H%M%S') +
'.zip"',
)
return data
"""
from __future__ import print_function
import dis
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.encoders import encode_quopri
import functools
import gc
from io import StringIO, BytesIO
from importlib import import_module
import itertools
import os
from collections import defaultdict
import zipfile
import pprofile
def getFuncCodeOrNone(module, attribute_path):
try:
value = import_module(module)
for attribute in attribute_path:
value = getattr(value, attribute)
value = value.func_code
except (ImportError, AttributeError):
print('Could not reach func_code of module %r, attribute path %r' % (module, attribute_path))
return None
return value
DB_query_func_code = getFuncCodeOrNone('Products.ZMySQLDA.db', ('DB', '_query'))
ZODB_setstate_func_code = getFuncCodeOrNone('ZODB.Connection', ('Connection', '_setstate'))
PythonExpr__call__func_code = getFuncCodeOrNone('zope.tales.pythonexpr', ('PythonExpr', '__call__'))
ZRPythonExpr__call__func_code = getFuncCodeOrNone('Products.PageTemplates.ZRPythonExpr', ('PythonExpr', '__call__'))
DT_UtilEvaleval_func_code = getFuncCodeOrNone('DocumentTemplate.DT_Util', ('Eval', 'eval'))
SharedDCScriptsBindings_bindAndExec_func_code = getFuncCodeOrNone('Shared.DC.Scripts.Bindings', ('Bindings', '_bindAndExec'))
PythonScript_exec_func_code = getFuncCodeOrNone('Products.PythonScripts.PythonScript', ('PythonScript', '_exec'))
# OFS.Traversable.Traversable.unrestrictedTraverse overwites its path argument,
# preventing post-invocation introspection. As it does not mutate the argument,
# it is still possible to inspect using such controlled intermediate function.
def unrestrictedTraverse_spy(self, path, *args, **kw):
return orig_unrestrictedTraverse(self, path, *args, **kw)
unrestrictedTraverse_spy_func_code = unrestrictedTraverse_spy.func_code
try:
import OFS.Traversable
orig_unrestrictedTraverse = OFS.Traversable.Traversable.unrestrictedTraverse
except (ImportError, AttributeError):
pass
else:
functools.update_wrapper(unrestrictedTraverse_spy, orig_unrestrictedTraverse)
OFS.Traversable.Traversable.unrestrictedTraverse = unrestrictedTraverse_spy
_ALLSEP = os.sep + (os.altsep or '')
PYTHON_EXPR_FUNC_CODE_SET = (ZRPythonExpr__call__func_code, PythonExpr__call__func_code)
class ZopeFileTiming(pprofile.FileTiming):
def call(self, code, line, callee_file_timing, callee, duration, frame):
f_code = frame.f_code
if f_code is DB_query_func_code:
self.profiler.sql_dict[frame.f_locals['query']].append(duration)
elif f_code is ZODB_setstate_func_code:
f_locals = frame.f_locals
obj = f_locals['obj']
try:
oid = obj._p_oid
except AttributeError:
pass
else:
self.profiler.zodb_dict[
f_locals['self'].db().database_name
][oid].append(duration)
elif f_code is unrestrictedTraverse_spy_func_code:
f_locals = frame.f_locals
self.profiler.traverse_dict[
(repr(f_locals['self']), repr(f_locals['path']))
].append(duration)
super(ZopeFileTiming, self).call(
code, line, callee_file_timing, callee, duration, frame,
)
def tabulate(title_list, row_list):
# de-lazify
row_list = list(row_list)
column_count = len(title_list)
max_width_list = [len(x) for x in title_list]
for row in row_list:
assert len(row) == column_count, repr(row)
for index, value in enumerate(row):
max_width_list[index] = max(max_width_list[index], len(unicode(value)))
format_string = u''.join(u'| %%-%is ' % x for x in max_width_list) + u'|\n'
out = StringIO()
write = out.write
write(format_string % tuple(title_list))
write(u''.join(u'+' + (u'-' * (x + 2)) for x in max_width_list) + u'+\n')
for row in row_list:
write(format_string % tuple(row))
return out.getvalue()
class ZopeMixIn(object):
virtual__slots__ = (
'sql_dict',
'zodb_dict',
'fake_source_dict',
'traverse_dict',
'keep_alive', # until they see the cake
)
__allow_access_to_unprotected_subobjects__ = 1
FileTiming = ZopeFileTiming
def __init__(self):
super(ZopeMixIn, self).__init__()
self.sql_dict = defaultdict(list)
self.zodb_dict = defaultdict(lambda: defaultdict(list))
self.fake_source_dict = {}
self.traverse_dict = defaultdict(list)
self.keep_alive = []
def _enable(self):
gc.disable()
super(ZopeMixIn, self)._enable()
def _disable(self):
super(ZopeMixIn, self)._disable()
gc.enable()
def _getline(self, filename, lineno, global_dict):
line_list = self.fake_source_dict.get(filename)
if line_list is None:
return super(ZopeMixIn, self)._getline(
filename,
lineno,
global_dict,
)
assert lineno > 0
try:
return line_list[lineno - 1]
except IndexError:
return ''
def _rememberFile(self, source, suggested_name, extension):
filename = suggested_name
setdefault = self.fake_source_dict.setdefault
suffix = itertools.count()
source = source.splitlines(True)
while setdefault(filename + extension, source) != source:
filename = suggested_name + '_%i' % next(suffix)
return filename + extension
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
frame_globals = frame.f_globals
evaluator_frame = frame.f_back
while evaluator_frame is not None:
evaluator_code = evaluator_frame.f_code
if (
evaluator_code is PythonScript_exec_func_code and
evaluator_frame.f_locals.get('g') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals['fcode']
break
elif (
evaluator_code is DT_UtilEvaleval_func_code and
evaluator_frame.f_locals.get('d') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals['code']
break
elif (
evaluator_code in PYTHON_EXPR_FUNC_CODE_SET and
evaluator_frame.f_locals.get('vars') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals[
'self'
]._code
break
evaluator_frame = evaluator_frame.f_back
else:
# No evaluator found
evaluator_frame = frame
evaluated_module_unique = frame_globals
try:
file_timing = self.global_dict[id(evaluated_module_unique)]
except KeyError:
# Unknown module, guess its name.
if evaluator_frame is frame:
# No evaluator found.
# The answer was not in the stack.
# Maybe its name is actually fine ?
name = self._getFilename(frame)
if not super(ZopeMixIn, self)._getline(
name,
1,
frame.f_globals,
):
# Shared.DC.Scripts preamble is directly called by
# _bindAndExec.
if getattr(
frame.f_back,
'f_code',
None,
) is SharedDCScriptsBindings_bindAndExec_func_code:
name = self._rememberFile(
u'# This is an auto-generated preamble executed '
u'by Shared.DC.Scripts.Bindings before "actual" '
u'code.\n' +
disassemble(frame.f_code),
'preamble',
'.py.bytecode',
)
else:
# Could not find source, provide disassembled
# bytecode as last resort.
name = self._rememberFile(
u'# Unidentified source for ' +
name + '\n' +
disassemble(
frame.f_code,
),
'%s.%s' % (name, frame.f_code.co_name),
'.py.bytecode',
)
else:
# Evaluator found.
if evaluator_code is PythonScript_exec_func_code:
python_script = evaluator_frame.f_locals['self']
name = self._rememberFile(
python_script.body().decode('utf-8'),
python_script.id,
'.py',
)
elif evaluator_code is DT_UtilEvaleval_func_code:
name = self._rememberFile(
evaluator_frame.f_locals['self'].expr.decode(
'utf-8',
),
'DT_Util_Eval',
'.py',
)
elif evaluator_code in PYTHON_EXPR_FUNC_CODE_SET:
source = evaluator_frame.f_locals['self'].text
if not isinstance(source, unicode):
source = source.decode('utf-8')
name = self._rememberFile(
source,
'PythonExpr',
'.py',
)
else:
raise ValueError(evaluator_code)
self.keep_alive.append(evaluated_module_unique)
# Create FileTiming and store as module...
self.global_dict[
id(evaluated_module_unique)
] = file_timing = self.FileTiming(
name,
frame_globals,
self,
)
# ...and for later deduplication (in case of multithreading).
# file_dict modifications must be thread-safe to not lose
# measures. setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
# Alias module FileTiming to current globals, for faster future
# lookup.
self.global_dict[id(frame_globals)] = file_timing
self.keep_alive.append(frame_globals)
return file_timing
def _iterOutFiles(self):
"""
Yields path, data, mimetype for each file involved on or produced by
profiling.
"""
out = StringIO()
self.callgrind(out, relative_path=True)
yield (
'cachegrind.out.pprofile',
out.getvalue(),
'application/x-kcachegrind',
)
for name, lines in self.iterSource():
lines = ''.join(lines)
if lines:
if isinstance(lines, unicode):
lines = lines.encode('utf-8')
yield (
os.path.normpath(
os.path.splitdrive(name)[1]
).lstrip(_ALLSEP),
lines,
'text/x-python',
)
sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len(
str(len(self.sql_dict)),
)
for index, (query, time_list) in enumerate(
sorted(
self.sql_dict.iteritems(),
key=lambda x: (sum(x[1]), len(x[1])),
reverse=True,
),
):
yield (
sql_name_template % (
index,
len(time_list),
sum(time_list),
),
b'\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\n' + query,
'application/sql',
)
if self.zodb_dict:
yield (
'ZODB_setstate.txt',
'\n\n'.join(
(
'%s (%fs)\n' % (
db_name,
sum(sum(x) for x in oid_dict.itervalues()),
)
) + '\n'.join(
'%s (%i): %s' % (
oid.encode('hex'),
len(time_list),
', '.join('%fs' % x for x in time_list),
)
for oid, time_list in oid_dict.iteritems()
)
for db_name, oid_dict in self.zodb_dict.iteritems()
),
'text/plain',
)
if self.traverse_dict:
yield (
'unrestrictedTraverse_pathlist.txt',
tabulate(
('self', 'path', 'hit', 'total duration'),
sorted(
(
(context, path, len(duration_list), sum(duration_list))
for (context, path), duration_list in self.traverse_dict.iteritems()
),
key=lambda x: x[3],
reverse=True,
),
),
'text/plain',
)
def asMIMEString(self):
"""
Return a mime-multipart representation of:
- callgrind profiling statistics (cachegrind.out.pprofile)
- any SQL query issued via ZMySQLDA (query_*.sql)
- any persistent object load via ZODB.Connection (ZODB_setstate.txt)
- any path argument given to unrestrictedTraverse
(unrestrictedTraverse_pathlist.txt)
- all involved python code, including Python Scripts without hierarchy
(the rest)
To unpack resulting file, see "unpack a MIME message" in
http://docs.python.org/2/library/email-examples.html
Or get demultipart from
https://pypi.python.org/pypi/demultipart
"""
result = MIMEMultipart()
base_type_dict = {
'application': MIMEApplication,
'text': MIMEText,
}
encoder_dict = {
'application/x-kcachegrind': encode_quopri,
'text/x-python': 'utf-8',
'text/plain': 'utf-8',
}
for path, data, mimetype in self._iterOutFiles():
base_type, sub_type = mimetype.split('/')
chunk = base_type_dict[base_type](
data,
sub_type,
encoder_dict.get(mimetype),
)
chunk.add_header(
'Content-Disposition',
'attachment',
filename=path,
)
result.attach(chunk)
return result.as_string(), result['content-type']
def asZip(self):
"""
Return a serialised zip archive containing:
- callgrind profiling statistics (cachegrind.out.pprofile)
- any SQL query issued via ZMySQLDA (query_*.sql)
- any persistent object load via ZODB.Connection (ZODB_setstate.txt)
- any path argument given to unrestrictedTraverse
(unrestrictedTraverse_pathlist.txt)
- all involved python code, including Python Scripts without hierarchy
(the rest)
"""
out = BytesIO()
with zipfile.ZipFile(
out,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as outfile:
for path, data, _ in self._iterOutFiles():
outfile.writestr(path, data)
return out.getvalue(), 'application/zip'
class ZopeProfiler(ZopeMixIn, pprofile.Profile):
__slots__ = ZopeMixIn.virtual__slots__
class ZopeStatisticalProfile(ZopeMixIn, pprofile.StatisticalProfile):
__slots__ = ZopeMixIn.virtual__slots__
class ZopeStatisticalThread(pprofile.StatisticalThread):
__allow_access_to_unprotected_subobjects__ = 1
# Intercept "verbose" parameter to prevent writing to stdout.
def getProfiler(verbose=False, **kw):
"""
Get a Zope-friendly pprofile.Profile instance.
"""
return ZopeProfiler(**kw)
def getStatisticalProfilerAndThread(**kw):
"""
Get Zope-friendly pprofile.StatisticalProfile and
pprofile.StatisticalThread instances.
Arguments are forwarded to StatisticalThread.__init__ .
"""
profiler = ZopeStatisticalProfile()
return profiler, ZopeStatisticalThread(
profiler=profiler,
**kw
)
|
vpelletier/pprofile | zpprofile.py | ZopeMixIn._iterOutFiles | python | def _iterOutFiles(self):
out = StringIO()
self.callgrind(out, relative_path=True)
yield (
'cachegrind.out.pprofile',
out.getvalue(),
'application/x-kcachegrind',
)
for name, lines in self.iterSource():
lines = ''.join(lines)
if lines:
if isinstance(lines, unicode):
lines = lines.encode('utf-8')
yield (
os.path.normpath(
os.path.splitdrive(name)[1]
).lstrip(_ALLSEP),
lines,
'text/x-python',
)
sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len(
str(len(self.sql_dict)),
)
for index, (query, time_list) in enumerate(
sorted(
self.sql_dict.iteritems(),
key=lambda x: (sum(x[1]), len(x[1])),
reverse=True,
),
):
yield (
sql_name_template % (
index,
len(time_list),
sum(time_list),
),
b'\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\n' + query,
'application/sql',
)
if self.zodb_dict:
yield (
'ZODB_setstate.txt',
'\n\n'.join(
(
'%s (%fs)\n' % (
db_name,
sum(sum(x) for x in oid_dict.itervalues()),
)
) + '\n'.join(
'%s (%i): %s' % (
oid.encode('hex'),
len(time_list),
', '.join('%fs' % x for x in time_list),
)
for oid, time_list in oid_dict.iteritems()
)
for db_name, oid_dict in self.zodb_dict.iteritems()
),
'text/plain',
)
if self.traverse_dict:
yield (
'unrestrictedTraverse_pathlist.txt',
tabulate(
('self', 'path', 'hit', 'total duration'),
sorted(
(
(context, path, len(duration_list), sum(duration_list))
for (context, path), duration_list in self.traverse_dict.iteritems()
),
key=lambda x: x[3],
reverse=True,
),
),
'text/plain',
) | Yields path, data, mimetype for each file involved on or produced by
profiling. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/zpprofile.py#L407-L486 | [
"def tabulate(title_list, row_list):\n # de-lazify\n row_list = list(row_list)\n column_count = len(title_list)\n max_width_list = [len(x) for x in title_list]\n for row in row_list:\n assert len(row) == column_count, repr(row)\n for index, value in enumerate(row):\n max_width_list[index] = max(max_width_list[index], len(unicode(value)))\n format_string = u''.join(u'| %%-%is ' % x for x in max_width_list) + u'|\\n'\n out = StringIO()\n write = out.write\n write(format_string % tuple(title_list))\n write(u''.join(u'+' + (u'-' * (x + 2)) for x in max_width_list) + u'+\\n')\n for row in row_list:\n write(format_string % tuple(row))\n return out.getvalue()\n"
] | class ZopeMixIn(object):
virtual__slots__ = (
'sql_dict',
'zodb_dict',
'fake_source_dict',
'traverse_dict',
'keep_alive', # until they see the cake
)
__allow_access_to_unprotected_subobjects__ = 1
FileTiming = ZopeFileTiming
def __init__(self):
super(ZopeMixIn, self).__init__()
self.sql_dict = defaultdict(list)
self.zodb_dict = defaultdict(lambda: defaultdict(list))
self.fake_source_dict = {}
self.traverse_dict = defaultdict(list)
self.keep_alive = []
def _enable(self):
gc.disable()
super(ZopeMixIn, self)._enable()
def _disable(self):
super(ZopeMixIn, self)._disable()
gc.enable()
def _getline(self, filename, lineno, global_dict):
line_list = self.fake_source_dict.get(filename)
if line_list is None:
return super(ZopeMixIn, self)._getline(
filename,
lineno,
global_dict,
)
assert lineno > 0
try:
return line_list[lineno - 1]
except IndexError:
return ''
def _rememberFile(self, source, suggested_name, extension):
filename = suggested_name
setdefault = self.fake_source_dict.setdefault
suffix = itertools.count()
source = source.splitlines(True)
while setdefault(filename + extension, source) != source:
filename = suggested_name + '_%i' % next(suffix)
return filename + extension
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
frame_globals = frame.f_globals
evaluator_frame = frame.f_back
while evaluator_frame is not None:
evaluator_code = evaluator_frame.f_code
if (
evaluator_code is PythonScript_exec_func_code and
evaluator_frame.f_locals.get('g') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals['fcode']
break
elif (
evaluator_code is DT_UtilEvaleval_func_code and
evaluator_frame.f_locals.get('d') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals['code']
break
elif (
evaluator_code in PYTHON_EXPR_FUNC_CODE_SET and
evaluator_frame.f_locals.get('vars') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals[
'self'
]._code
break
evaluator_frame = evaluator_frame.f_back
else:
# No evaluator found
evaluator_frame = frame
evaluated_module_unique = frame_globals
try:
file_timing = self.global_dict[id(evaluated_module_unique)]
except KeyError:
# Unknown module, guess its name.
if evaluator_frame is frame:
# No evaluator found.
# The answer was not in the stack.
# Maybe its name is actually fine ?
name = self._getFilename(frame)
if not super(ZopeMixIn, self)._getline(
name,
1,
frame.f_globals,
):
# Shared.DC.Scripts preamble is directly called by
# _bindAndExec.
if getattr(
frame.f_back,
'f_code',
None,
) is SharedDCScriptsBindings_bindAndExec_func_code:
name = self._rememberFile(
u'# This is an auto-generated preamble executed '
u'by Shared.DC.Scripts.Bindings before "actual" '
u'code.\n' +
disassemble(frame.f_code),
'preamble',
'.py.bytecode',
)
else:
# Could not find source, provide disassembled
# bytecode as last resort.
name = self._rememberFile(
u'# Unidentified source for ' +
name + '\n' +
disassemble(
frame.f_code,
),
'%s.%s' % (name, frame.f_code.co_name),
'.py.bytecode',
)
else:
# Evaluator found.
if evaluator_code is PythonScript_exec_func_code:
python_script = evaluator_frame.f_locals['self']
name = self._rememberFile(
python_script.body().decode('utf-8'),
python_script.id,
'.py',
)
elif evaluator_code is DT_UtilEvaleval_func_code:
name = self._rememberFile(
evaluator_frame.f_locals['self'].expr.decode(
'utf-8',
),
'DT_Util_Eval',
'.py',
)
elif evaluator_code in PYTHON_EXPR_FUNC_CODE_SET:
source = evaluator_frame.f_locals['self'].text
if not isinstance(source, unicode):
source = source.decode('utf-8')
name = self._rememberFile(
source,
'PythonExpr',
'.py',
)
else:
raise ValueError(evaluator_code)
self.keep_alive.append(evaluated_module_unique)
# Create FileTiming and store as module...
self.global_dict[
id(evaluated_module_unique)
] = file_timing = self.FileTiming(
name,
frame_globals,
self,
)
# ...and for later deduplication (in case of multithreading).
# file_dict modifications must be thread-safe to not lose
# measures. setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
# Alias module FileTiming to current globals, for faster future
# lookup.
self.global_dict[id(frame_globals)] = file_timing
self.keep_alive.append(frame_globals)
return file_timing
def asMIMEString(self):
"""
Return a mime-multipart representation of:
- callgrind profiling statistics (cachegrind.out.pprofile)
- any SQL query issued via ZMySQLDA (query_*.sql)
- any persistent object load via ZODB.Connection (ZODB_setstate.txt)
- any path argument given to unrestrictedTraverse
(unrestrictedTraverse_pathlist.txt)
- all involved python code, including Python Scripts without hierarchy
(the rest)
To unpack resulting file, see "unpack a MIME message" in
http://docs.python.org/2/library/email-examples.html
Or get demultipart from
https://pypi.python.org/pypi/demultipart
"""
result = MIMEMultipart()
base_type_dict = {
'application': MIMEApplication,
'text': MIMEText,
}
encoder_dict = {
'application/x-kcachegrind': encode_quopri,
'text/x-python': 'utf-8',
'text/plain': 'utf-8',
}
for path, data, mimetype in self._iterOutFiles():
base_type, sub_type = mimetype.split('/')
chunk = base_type_dict[base_type](
data,
sub_type,
encoder_dict.get(mimetype),
)
chunk.add_header(
'Content-Disposition',
'attachment',
filename=path,
)
result.attach(chunk)
return result.as_string(), result['content-type']
def asZip(self):
"""
Return a serialised zip archive containing:
- callgrind profiling statistics (cachegrind.out.pprofile)
- any SQL query issued via ZMySQLDA (query_*.sql)
- any persistent object load via ZODB.Connection (ZODB_setstate.txt)
- any path argument given to unrestrictedTraverse
(unrestrictedTraverse_pathlist.txt)
- all involved python code, including Python Scripts without hierarchy
(the rest)
"""
out = BytesIO()
with zipfile.ZipFile(
out,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as outfile:
for path, data, _ in self._iterOutFiles():
outfile.writestr(path, data)
return out.getvalue(), 'application/zip'
|
vpelletier/pprofile | zpprofile.py | ZopeMixIn.asMIMEString | python | def asMIMEString(self):
result = MIMEMultipart()
base_type_dict = {
'application': MIMEApplication,
'text': MIMEText,
}
encoder_dict = {
'application/x-kcachegrind': encode_quopri,
'text/x-python': 'utf-8',
'text/plain': 'utf-8',
}
for path, data, mimetype in self._iterOutFiles():
base_type, sub_type = mimetype.split('/')
chunk = base_type_dict[base_type](
data,
sub_type,
encoder_dict.get(mimetype),
)
chunk.add_header(
'Content-Disposition',
'attachment',
filename=path,
)
result.attach(chunk)
return result.as_string(), result['content-type'] | Return a mime-multipart representation of:
- callgrind profiling statistics (cachegrind.out.pprofile)
- any SQL query issued via ZMySQLDA (query_*.sql)
- any persistent object load via ZODB.Connection (ZODB_setstate.txt)
- any path argument given to unrestrictedTraverse
(unrestrictedTraverse_pathlist.txt)
- all involved python code, including Python Scripts without hierarchy
(the rest)
To unpack resulting file, see "unpack a MIME message" in
http://docs.python.org/2/library/email-examples.html
Or get demultipart from
https://pypi.python.org/pypi/demultipart | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/zpprofile.py#L488-L526 | [
"def _iterOutFiles(self):\n \"\"\"\n Yields path, data, mimetype for each file involved on or produced by\n profiling.\n \"\"\"\n out = StringIO()\n self.callgrind(out, relative_path=True)\n yield (\n 'cachegrind.out.pprofile',\n out.getvalue(),\n 'application/x-kcachegrind',\n )\n for name, lines in self.iterSource():\n lines = ''.join(lines)\n if lines:\n if isinstance(lines, unicode):\n lines = lines.encode('utf-8')\n yield (\n os.path.normpath(\n os.path.splitdrive(name)[1]\n ).lstrip(_ALLSEP),\n lines,\n 'text/x-python',\n )\n sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len(\n str(len(self.sql_dict)),\n )\n for index, (query, time_list) in enumerate(\n sorted(\n self.sql_dict.iteritems(),\n key=lambda x: (sum(x[1]), len(x[1])),\n reverse=True,\n ),\n ):\n yield (\n sql_name_template % (\n index,\n len(time_list),\n sum(time_list),\n ),\n b'\\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\\n' + query,\n 'application/sql',\n )\n if self.zodb_dict:\n yield (\n 'ZODB_setstate.txt',\n '\\n\\n'.join(\n (\n '%s (%fs)\\n' % (\n db_name,\n sum(sum(x) for x in oid_dict.itervalues()),\n )\n ) + '\\n'.join(\n '%s (%i): %s' % (\n oid.encode('hex'),\n len(time_list),\n ', '.join('%fs' % x for x in time_list),\n )\n for oid, time_list in oid_dict.iteritems()\n )\n for db_name, oid_dict in self.zodb_dict.iteritems()\n ),\n 'text/plain',\n )\n if self.traverse_dict:\n yield (\n 'unrestrictedTraverse_pathlist.txt',\n tabulate(\n ('self', 'path', 'hit', 'total duration'),\n sorted(\n (\n (context, path, len(duration_list), sum(duration_list))\n for (context, path), duration_list in self.traverse_dict.iteritems()\n ),\n key=lambda x: x[3],\n reverse=True,\n ),\n ),\n 'text/plain',\n )\n"
] | class ZopeMixIn(object):
virtual__slots__ = (
'sql_dict',
'zodb_dict',
'fake_source_dict',
'traverse_dict',
'keep_alive', # until they see the cake
)
__allow_access_to_unprotected_subobjects__ = 1
FileTiming = ZopeFileTiming
def __init__(self):
super(ZopeMixIn, self).__init__()
self.sql_dict = defaultdict(list)
self.zodb_dict = defaultdict(lambda: defaultdict(list))
self.fake_source_dict = {}
self.traverse_dict = defaultdict(list)
self.keep_alive = []
def _enable(self):
gc.disable()
super(ZopeMixIn, self)._enable()
def _disable(self):
super(ZopeMixIn, self)._disable()
gc.enable()
def _getline(self, filename, lineno, global_dict):
line_list = self.fake_source_dict.get(filename)
if line_list is None:
return super(ZopeMixIn, self)._getline(
filename,
lineno,
global_dict,
)
assert lineno > 0
try:
return line_list[lineno - 1]
except IndexError:
return ''
def _rememberFile(self, source, suggested_name, extension):
filename = suggested_name
setdefault = self.fake_source_dict.setdefault
suffix = itertools.count()
source = source.splitlines(True)
while setdefault(filename + extension, source) != source:
filename = suggested_name + '_%i' % next(suffix)
return filename + extension
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
frame_globals = frame.f_globals
evaluator_frame = frame.f_back
while evaluator_frame is not None:
evaluator_code = evaluator_frame.f_code
if (
evaluator_code is PythonScript_exec_func_code and
evaluator_frame.f_locals.get('g') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals['fcode']
break
elif (
evaluator_code is DT_UtilEvaleval_func_code and
evaluator_frame.f_locals.get('d') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals['code']
break
elif (
evaluator_code in PYTHON_EXPR_FUNC_CODE_SET and
evaluator_frame.f_locals.get('vars') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals[
'self'
]._code
break
evaluator_frame = evaluator_frame.f_back
else:
# No evaluator found
evaluator_frame = frame
evaluated_module_unique = frame_globals
try:
file_timing = self.global_dict[id(evaluated_module_unique)]
except KeyError:
# Unknown module, guess its name.
if evaluator_frame is frame:
# No evaluator found.
# The answer was not in the stack.
# Maybe its name is actually fine ?
name = self._getFilename(frame)
if not super(ZopeMixIn, self)._getline(
name,
1,
frame.f_globals,
):
# Shared.DC.Scripts preamble is directly called by
# _bindAndExec.
if getattr(
frame.f_back,
'f_code',
None,
) is SharedDCScriptsBindings_bindAndExec_func_code:
name = self._rememberFile(
u'# This is an auto-generated preamble executed '
u'by Shared.DC.Scripts.Bindings before "actual" '
u'code.\n' +
disassemble(frame.f_code),
'preamble',
'.py.bytecode',
)
else:
# Could not find source, provide disassembled
# bytecode as last resort.
name = self._rememberFile(
u'# Unidentified source for ' +
name + '\n' +
disassemble(
frame.f_code,
),
'%s.%s' % (name, frame.f_code.co_name),
'.py.bytecode',
)
else:
# Evaluator found.
if evaluator_code is PythonScript_exec_func_code:
python_script = evaluator_frame.f_locals['self']
name = self._rememberFile(
python_script.body().decode('utf-8'),
python_script.id,
'.py',
)
elif evaluator_code is DT_UtilEvaleval_func_code:
name = self._rememberFile(
evaluator_frame.f_locals['self'].expr.decode(
'utf-8',
),
'DT_Util_Eval',
'.py',
)
elif evaluator_code in PYTHON_EXPR_FUNC_CODE_SET:
source = evaluator_frame.f_locals['self'].text
if not isinstance(source, unicode):
source = source.decode('utf-8')
name = self._rememberFile(
source,
'PythonExpr',
'.py',
)
else:
raise ValueError(evaluator_code)
self.keep_alive.append(evaluated_module_unique)
# Create FileTiming and store as module...
self.global_dict[
id(evaluated_module_unique)
] = file_timing = self.FileTiming(
name,
frame_globals,
self,
)
# ...and for later deduplication (in case of multithreading).
# file_dict modifications must be thread-safe to not lose
# measures. setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
# Alias module FileTiming to current globals, for faster future
# lookup.
self.global_dict[id(frame_globals)] = file_timing
self.keep_alive.append(frame_globals)
return file_timing
def _iterOutFiles(self):
"""
Yields path, data, mimetype for each file involved on or produced by
profiling.
"""
out = StringIO()
self.callgrind(out, relative_path=True)
yield (
'cachegrind.out.pprofile',
out.getvalue(),
'application/x-kcachegrind',
)
for name, lines in self.iterSource():
lines = ''.join(lines)
if lines:
if isinstance(lines, unicode):
lines = lines.encode('utf-8')
yield (
os.path.normpath(
os.path.splitdrive(name)[1]
).lstrip(_ALLSEP),
lines,
'text/x-python',
)
sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len(
str(len(self.sql_dict)),
)
for index, (query, time_list) in enumerate(
sorted(
self.sql_dict.iteritems(),
key=lambda x: (sum(x[1]), len(x[1])),
reverse=True,
),
):
yield (
sql_name_template % (
index,
len(time_list),
sum(time_list),
),
b'\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\n' + query,
'application/sql',
)
if self.zodb_dict:
yield (
'ZODB_setstate.txt',
'\n\n'.join(
(
'%s (%fs)\n' % (
db_name,
sum(sum(x) for x in oid_dict.itervalues()),
)
) + '\n'.join(
'%s (%i): %s' % (
oid.encode('hex'),
len(time_list),
', '.join('%fs' % x for x in time_list),
)
for oid, time_list in oid_dict.iteritems()
)
for db_name, oid_dict in self.zodb_dict.iteritems()
),
'text/plain',
)
if self.traverse_dict:
yield (
'unrestrictedTraverse_pathlist.txt',
tabulate(
('self', 'path', 'hit', 'total duration'),
sorted(
(
(context, path, len(duration_list), sum(duration_list))
for (context, path), duration_list in self.traverse_dict.iteritems()
),
key=lambda x: x[3],
reverse=True,
),
),
'text/plain',
)
def asZip(self):
"""
Return a serialised zip archive containing:
- callgrind profiling statistics (cachegrind.out.pprofile)
- any SQL query issued via ZMySQLDA (query_*.sql)
- any persistent object load via ZODB.Connection (ZODB_setstate.txt)
- any path argument given to unrestrictedTraverse
(unrestrictedTraverse_pathlist.txt)
- all involved python code, including Python Scripts without hierarchy
(the rest)
"""
out = BytesIO()
with zipfile.ZipFile(
out,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as outfile:
for path, data, _ in self._iterOutFiles():
outfile.writestr(path, data)
return out.getvalue(), 'application/zip'
|
vpelletier/pprofile | zpprofile.py | ZopeMixIn.asZip | python | def asZip(self):
out = BytesIO()
with zipfile.ZipFile(
out,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as outfile:
for path, data, _ in self._iterOutFiles():
outfile.writestr(path, data)
return out.getvalue(), 'application/zip' | Return a serialised zip archive containing:
- callgrind profiling statistics (cachegrind.out.pprofile)
- any SQL query issued via ZMySQLDA (query_*.sql)
- any persistent object load via ZODB.Connection (ZODB_setstate.txt)
- any path argument given to unrestrictedTraverse
(unrestrictedTraverse_pathlist.txt)
- all involved python code, including Python Scripts without hierarchy
(the rest) | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/zpprofile.py#L528-L547 | [
"def _iterOutFiles(self):\n \"\"\"\n Yields path, data, mimetype for each file involved on or produced by\n profiling.\n \"\"\"\n out = StringIO()\n self.callgrind(out, relative_path=True)\n yield (\n 'cachegrind.out.pprofile',\n out.getvalue(),\n 'application/x-kcachegrind',\n )\n for name, lines in self.iterSource():\n lines = ''.join(lines)\n if lines:\n if isinstance(lines, unicode):\n lines = lines.encode('utf-8')\n yield (\n os.path.normpath(\n os.path.splitdrive(name)[1]\n ).lstrip(_ALLSEP),\n lines,\n 'text/x-python',\n )\n sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len(\n str(len(self.sql_dict)),\n )\n for index, (query, time_list) in enumerate(\n sorted(\n self.sql_dict.iteritems(),\n key=lambda x: (sum(x[1]), len(x[1])),\n reverse=True,\n ),\n ):\n yield (\n sql_name_template % (\n index,\n len(time_list),\n sum(time_list),\n ),\n b'\\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\\n' + query,\n 'application/sql',\n )\n if self.zodb_dict:\n yield (\n 'ZODB_setstate.txt',\n '\\n\\n'.join(\n (\n '%s (%fs)\\n' % (\n db_name,\n sum(sum(x) for x in oid_dict.itervalues()),\n )\n ) + '\\n'.join(\n '%s (%i): %s' % (\n oid.encode('hex'),\n len(time_list),\n ', '.join('%fs' % x for x in time_list),\n )\n for oid, time_list in oid_dict.iteritems()\n )\n for db_name, oid_dict in self.zodb_dict.iteritems()\n ),\n 'text/plain',\n )\n if self.traverse_dict:\n yield (\n 'unrestrictedTraverse_pathlist.txt',\n tabulate(\n ('self', 'path', 'hit', 'total duration'),\n sorted(\n (\n (context, path, len(duration_list), sum(duration_list))\n for (context, path), duration_list in self.traverse_dict.iteritems()\n ),\n key=lambda x: x[3],\n reverse=True,\n ),\n ),\n 'text/plain',\n )\n"
] | class ZopeMixIn(object):
virtual__slots__ = (
'sql_dict',
'zodb_dict',
'fake_source_dict',
'traverse_dict',
'keep_alive', # until they see the cake
)
__allow_access_to_unprotected_subobjects__ = 1
FileTiming = ZopeFileTiming
def __init__(self):
super(ZopeMixIn, self).__init__()
self.sql_dict = defaultdict(list)
self.zodb_dict = defaultdict(lambda: defaultdict(list))
self.fake_source_dict = {}
self.traverse_dict = defaultdict(list)
self.keep_alive = []
def _enable(self):
gc.disable()
super(ZopeMixIn, self)._enable()
def _disable(self):
super(ZopeMixIn, self)._disable()
gc.enable()
def _getline(self, filename, lineno, global_dict):
line_list = self.fake_source_dict.get(filename)
if line_list is None:
return super(ZopeMixIn, self)._getline(
filename,
lineno,
global_dict,
)
assert lineno > 0
try:
return line_list[lineno - 1]
except IndexError:
return ''
def _rememberFile(self, source, suggested_name, extension):
filename = suggested_name
setdefault = self.fake_source_dict.setdefault
suffix = itertools.count()
source = source.splitlines(True)
while setdefault(filename + extension, source) != source:
filename = suggested_name + '_%i' % next(suffix)
return filename + extension
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
frame_globals = frame.f_globals
evaluator_frame = frame.f_back
while evaluator_frame is not None:
evaluator_code = evaluator_frame.f_code
if (
evaluator_code is PythonScript_exec_func_code and
evaluator_frame.f_locals.get('g') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals['fcode']
break
elif (
evaluator_code is DT_UtilEvaleval_func_code and
evaluator_frame.f_locals.get('d') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals['code']
break
elif (
evaluator_code in PYTHON_EXPR_FUNC_CODE_SET and
evaluator_frame.f_locals.get('vars') is frame_globals
):
evaluated_module_unique = evaluator_frame.f_locals[
'self'
]._code
break
evaluator_frame = evaluator_frame.f_back
else:
# No evaluator found
evaluator_frame = frame
evaluated_module_unique = frame_globals
try:
file_timing = self.global_dict[id(evaluated_module_unique)]
except KeyError:
# Unknown module, guess its name.
if evaluator_frame is frame:
# No evaluator found.
# The answer was not in the stack.
# Maybe its name is actually fine ?
name = self._getFilename(frame)
if not super(ZopeMixIn, self)._getline(
name,
1,
frame.f_globals,
):
# Shared.DC.Scripts preamble is directly called by
# _bindAndExec.
if getattr(
frame.f_back,
'f_code',
None,
) is SharedDCScriptsBindings_bindAndExec_func_code:
name = self._rememberFile(
u'# This is an auto-generated preamble executed '
u'by Shared.DC.Scripts.Bindings before "actual" '
u'code.\n' +
disassemble(frame.f_code),
'preamble',
'.py.bytecode',
)
else:
# Could not find source, provide disassembled
# bytecode as last resort.
name = self._rememberFile(
u'# Unidentified source for ' +
name + '\n' +
disassemble(
frame.f_code,
),
'%s.%s' % (name, frame.f_code.co_name),
'.py.bytecode',
)
else:
# Evaluator found.
if evaluator_code is PythonScript_exec_func_code:
python_script = evaluator_frame.f_locals['self']
name = self._rememberFile(
python_script.body().decode('utf-8'),
python_script.id,
'.py',
)
elif evaluator_code is DT_UtilEvaleval_func_code:
name = self._rememberFile(
evaluator_frame.f_locals['self'].expr.decode(
'utf-8',
),
'DT_Util_Eval',
'.py',
)
elif evaluator_code in PYTHON_EXPR_FUNC_CODE_SET:
source = evaluator_frame.f_locals['self'].text
if not isinstance(source, unicode):
source = source.decode('utf-8')
name = self._rememberFile(
source,
'PythonExpr',
'.py',
)
else:
raise ValueError(evaluator_code)
self.keep_alive.append(evaluated_module_unique)
# Create FileTiming and store as module...
self.global_dict[
id(evaluated_module_unique)
] = file_timing = self.FileTiming(
name,
frame_globals,
self,
)
# ...and for later deduplication (in case of multithreading).
# file_dict modifications must be thread-safe to not lose
# measures. setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
# Alias module FileTiming to current globals, for faster future
# lookup.
self.global_dict[id(frame_globals)] = file_timing
self.keep_alive.append(frame_globals)
return file_timing
def _iterOutFiles(self):
"""
Yields path, data, mimetype for each file involved on or produced by
profiling.
"""
out = StringIO()
self.callgrind(out, relative_path=True)
yield (
'cachegrind.out.pprofile',
out.getvalue(),
'application/x-kcachegrind',
)
for name, lines in self.iterSource():
lines = ''.join(lines)
if lines:
if isinstance(lines, unicode):
lines = lines.encode('utf-8')
yield (
os.path.normpath(
os.path.splitdrive(name)[1]
).lstrip(_ALLSEP),
lines,
'text/x-python',
)
sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len(
str(len(self.sql_dict)),
)
for index, (query, time_list) in enumerate(
sorted(
self.sql_dict.iteritems(),
key=lambda x: (sum(x[1]), len(x[1])),
reverse=True,
),
):
yield (
sql_name_template % (
index,
len(time_list),
sum(time_list),
),
b'\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\n' + query,
'application/sql',
)
if self.zodb_dict:
yield (
'ZODB_setstate.txt',
'\n\n'.join(
(
'%s (%fs)\n' % (
db_name,
sum(sum(x) for x in oid_dict.itervalues()),
)
) + '\n'.join(
'%s (%i): %s' % (
oid.encode('hex'),
len(time_list),
', '.join('%fs' % x for x in time_list),
)
for oid, time_list in oid_dict.iteritems()
)
for db_name, oid_dict in self.zodb_dict.iteritems()
),
'text/plain',
)
if self.traverse_dict:
yield (
'unrestrictedTraverse_pathlist.txt',
tabulate(
('self', 'path', 'hit', 'total duration'),
sorted(
(
(context, path, len(duration_list), sum(duration_list))
for (context, path), duration_list in self.traverse_dict.iteritems()
),
key=lambda x: x[3],
reverse=True,
),
),
'text/plain',
)
def asMIMEString(self):
"""
Return a mime-multipart representation of:
- callgrind profiling statistics (cachegrind.out.pprofile)
- any SQL query issued via ZMySQLDA (query_*.sql)
- any persistent object load via ZODB.Connection (ZODB_setstate.txt)
- any path argument given to unrestrictedTraverse
(unrestrictedTraverse_pathlist.txt)
- all involved python code, including Python Scripts without hierarchy
(the rest)
To unpack resulting file, see "unpack a MIME message" in
http://docs.python.org/2/library/email-examples.html
Or get demultipart from
https://pypi.python.org/pypi/demultipart
"""
result = MIMEMultipart()
base_type_dict = {
'application': MIMEApplication,
'text': MIMEText,
}
encoder_dict = {
'application/x-kcachegrind': encode_quopri,
'text/x-python': 'utf-8',
'text/plain': 'utf-8',
}
for path, data, mimetype in self._iterOutFiles():
base_type, sub_type = mimetype.split('/')
chunk = base_type_dict[base_type](
data,
sub_type,
encoder_dict.get(mimetype),
)
chunk.add_header(
'Content-Disposition',
'attachment',
filename=path,
)
result.attach(chunk)
return result.as_string(), result['content-type']
|
vpelletier/pprofile | pprofile.py | run | python | def run(cmd, filename=None, threads=True, verbose=False):
_run(threads, verbose, 'run', filename, cmd) | Similar to profile.run . | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1182-L1184 | [
"def _run(threads, verbose, func_name, filename, *args, **kw):\n if threads:\n klass = ThreadProfile\n else:\n klass = Profile\n prof = klass(verbose=verbose)\n try:\n try:\n getattr(prof, func_name)(*args, **kw)\n except SystemExit:\n pass\n finally:\n if filename is None:\n prof.print_stats()\n else:\n prof.dump_stats(filename)\n"
] | #!/usr/bin/env python
# Copyright (C) 2013-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
pprofile - Line-granularity, thread-aware deterministic and statistic
pure-python profiler
Usage as a command line:
$ pprofile --exclude-syspath some_python_executable arg1 ...
$ pprofile --exclude-syspath -m some_python_module -- arg1 ...
$ python -m pprofile --exclude-syspath some_python_executable arg1 ...
$ python -m pprofile -m some_python_module -- arg1 ...
See --help for all options.
Usage as a python module:
Deterministic profiling:
>>> prof = pprofile.Profile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
Statistic profiling:
>>> prof = StatisticalProfile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
"""
from __future__ import print_function, division
from collections import defaultdict, deque
from functools import partial, wraps
# Note: use time, not clock.
# Clock, at least on linux, ignores time not spent executing code
# (ex: time.sleep()). The goal of pprofile is not to profile python code
# execution as such (ie, to improve python interpreter), but to profile a
# possibly complex application, with its (IO) waits, sleeps, (...) so a
# developper can understand what is slow rather than what keeps the cpu busy.
# So using the wall-clock as a way to measure time spent is more meaningful.
# XXX: This said, if time() lacks precision, a better but likely
# platform-dependent wall-clock time source must be identified and used.
from time import time
from warnings import warn
import argparse
import io
import inspect
from itertools import count
import linecache
import os
# not caught by 2to3, likely because pipes.quote is not documented in python 2
try:
from pipes import quote as shlex_quote # Python 2
except ImportError:
from shlex import quote as shlex_quote # Python 3
import platform
import re
import runpy
import shlex
from subprocess import list2cmdline as windows_list2cmdline
import sys
import threading
import zipfile
try:
from IPython.core.magic import register_line_cell_magic
except ImportError:
register_line_cell_magic = lambda x: x
__all__ = (
'ProfileBase', 'ProfileRunnerBase', 'Profile', 'ThreadProfile',
'StatisticProfile', 'StatisticThread', 'run', 'runctx', 'runfile',
'runpath',
)
class BaseLineIterator(object):
def __init__(self, getline, filename, global_dict):
self._getline = getline
self._filename = filename
self._global_dict = global_dict
self._lineno = 1
def __iter__(self):
return self
def next(self):
lineno = self._lineno
self._lineno += 1
return lineno, self._getline(self._filename, lineno, self._global_dict)
if sys.version_info < (3, ):
import codecs
# Find coding specification (see PEP-0263)
_matchCoding = re.compile(
r'^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)',
).match
class LineIterator(BaseLineIterator):
_encoding = None
def __init__(self, *args, **kw):
super(LineIterator, self).__init__(*args, **kw)
# Identify encoding.
first_line = self._getline(self._filename, 1, self._global_dict)
if isinstance(first_line, bytes):
# BOM - python2 only detects the (discouraged) UTF-8 BOM
if first_line.startswith(codecs.BOM_UTF8):
self._encoding = 'utf-8'
else:
# PEP-0263: "the first or second line must match [_matchCoding]"
match = _matchCoding(first_line)
if match is None:
match = _matchCoding(
self._getline(self._filename, 2, self._global_dict),
)
if match is None:
self._encoding = 'ascii'
else:
self._encoding = match.group(1)
# else, first line is unicode.
def next(self):
lineno, line = super(LineIterator, self).next()
if self._encoding:
line = line.decode(self._encoding, errors='replace')
return lineno, line
else:
# getline returns unicode objects, nothing to do
LineIterator = BaseLineIterator
if platform.system() == 'Windows':
quoteCommandline = windows_list2cmdline
else:
def quoteCommandline(commandline):
return ' '.join(shlex_quote(x) for x in commandline)
class EncodeOrReplaceWriter(object):
"""
Write-only file-ish object which replaces unsupported chars when
underlying file rejects them.
"""
def __init__(self, out):
self._encoding = getattr(out, 'encoding', None) or 'ascii'
self._write = out.write
def write(self, data):
try:
self._write(data)
except UnicodeEncodeError:
self._write(
data.encode(
self._encoding,
errors='replace',
).decode(self._encoding),
)
def _isCallgrindName(filepath):
return os.path.basename(filepath).startswith('cachegrind.out.')
class _FileTiming(object):
"""
Accumulation of profiling statistics (line and call durations) for a given
source "file" (unique global dict).
Subclasses should be aware that:
- this classes uses __slots__, mainly for cpu efficiency (property lookup
is in a list instead of a dict)
- it can access the BaseProfile instance which created any instace using
the "profiler" property, should they share some state across source
files.
- methods on this class are profiling choke-point - keep customisations
as cheap in CPU as you can !
"""
__slots__ = ('line_dict', 'call_dict', 'filename', 'global_dict',
'profiler')
def __init__(self, filename, global_dict, profiler):
self.filename = filename
self.global_dict = global_dict
self.line_dict = defaultdict(lambda: defaultdict(lambda: [0, 0]))
self.call_dict = {}
# Note: not used in this implementation, may be used by subclasses.
self.profiler = profiler
def hit(self, code, line, duration):
"""
A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds
"""
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration
def call(self, code, line, callee_file_timing, callee, duration, frame):
"""
A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return
"""
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration
def getHitStatsFor(self, line):
total_hits = total_duration = 0
for hits, duration in self.line_dict.get(line, {}).itervalues():
total_hits += hits
total_duration += duration
return total_hits, total_duration
def getLastLine(self):
return max(
max(self.line_dict) if self.line_dict else 0,
max(x for _, x, _ in self.call_dict) if self.call_dict else 0,
)
def iterHits(self):
for line, code_dict in self.line_dict.iteritems():
for code, (hits, duration) in code_dict.iteritems():
yield line, code, hits, duration
def iterCalls(self):
for (code, line, callee), (callee_file_timing, hit, duration) in \
self.call_dict.iteritems():
yield (
line,
code,
hit, duration,
callee_file_timing.filename, callee,
)
def getCallListByLine(self):
result = defaultdict(list)
for line, code, hit, duration, callee_filename, callee in self.iterCalls():
result[line].append((
code,
hit, duration,
callee_filename, callee,
))
return result
def getTotalTime(self):
return sum(
y[1]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getTotalHitCount(self):
return sum(
y[0]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getSortKey(self):
# total duration first, then total hit count for statistical profiling
result = [0, 0]
for entry in self.line_dict.itervalues():
for hit, duration in entry.itervalues():
result[0] += duration
result[1] += hit
return result
FileTiming = _FileTiming
class LocalDescriptor(threading.local):
"""
Implementation of descriptor API for thread-local properties.
"""
def __init__(self, func=None):
"""
func (callable)
If provided, called when a missing property is accessed
(ex: accessing thread never initialised that property).
If None, AttributeError is raised.
"""
super(LocalDescriptor, self).__init__()
if func is not None:
self.func = func
def __get__(self, instance, owner):
try:
return getattr(self, str(id(instance)))
except AttributeError:
# Raises AttributeError if func was not provided.
value = self.func()
setattr(self, str(id(instance)), value)
return value
def __set__(self, instance, value):
setattr(self, str(id(instance)), value)
def __delete__(self, instance):
try:
delattr(self, str(id(instance)))
except AttributeError:
pass
_ANNOTATE_HEADER = \
u'%6s|%10s|' \
u'%13s|%13s|%7s|' \
u'Source code' % (
u'Line #', u'Hits',
u'Time', u'Time per hit', u'%',
)
_ANNOTATE_HORIZONTAL_LINE = u''.join(x == u'|' and u'+' or u'-'
for x in _ANNOTATE_HEADER)
_ANNOTATE_FORMAT = \
u'%(lineno)6i|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'%(line)s'
_ANNOTATE_CALL_FORMAT = \
u'(call)|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'# %(callee_file)s:%(callee_line)s %(callee_name)s'
def _initStack():
# frame_time: when current frame execution started/resumed last
# frame_discount: time discounted from current frame, because it appeared
# lower in the call stack from the same callsite
# lineno: latest line which execution started
# line_time: time at which latest line started being executed
# line_duration: total time spent in current line up to last resume
now = time()
return (deque([[now, 0, None, now, 0]]), defaultdict(deque))
def _verboseProfileDecorator(self):
def decorator(func):
@wraps(func)
def wrapper(frame, event, arg):
self._traceEvent(frame, event)
return func(frame, event, arg)
return wrapper
return decorator
class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def getFilenameSet(self):
"""
Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path.
"""
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
def annotate(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored.
"""
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out)
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
class ProfileRunnerBase(object):
def __call__(self):
return self
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
# profile/cProfile-like API
def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self
def runcall(self, func, *args, **kw):
"""Similar to profile.Profile.runcall ."""
with self():
return func(*args, **kw)
def runfile(self, fd, argv, fd_name='<unknown>', compile_flags=0,
dont_inherit=1, globals={}):
with fd:
code = compile(fd.read(), fd_name, 'exec', flags=compile_flags,
dont_inherit=dont_inherit)
original_sys_argv = list(sys.argv)
ctx_globals = globals.copy()
ctx_globals['__file__'] = fd_name
ctx_globals['__name__'] = '__main__'
ctx_globals['__package__'] = None
try:
sys.argv[:] = argv
return self.runctx(code, ctx_globals, None)
finally:
sys.argv[:] = original_sys_argv
def runpath(self, path, argv):
original_sys_path = list(sys.path)
try:
sys.path.insert(0, os.path.dirname(path))
return self.runfile(open(path, 'rb'), argv, fd_name=path)
finally:
sys.path[:] = original_sys_path
def runmodule(self, module, argv):
original_sys_argv = list(sys.argv)
original_sys_path0 = sys.path[0]
try:
sys.path[0] = os.getcwd()
sys.argv[:] = argv
with self():
runpy.run_module(module, run_name='__main__', alter_sys=True)
finally:
sys.argv[:] = original_sys_argv
sys.path[0] = original_sys_path0
return self
class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',
'_local_trace',
'stack',
'enabled_start',
)
def __init__(self, verbose=False):
super(Profile, self).__init__()
if verbose:
self._global_trace = _verboseProfileDecorator(self)(
self._real_global_trace)
self._local_trace = _verboseProfileDecorator(self)(
self._real_local_trace)
else:
self._global_trace = self._real_global_trace
self._local_trace = self._real_local_trace
self.stack = None
self.enabled_start = None
def _enable(self):
"""
Overload this method when subclassing. Called before actually
enabling trace.
"""
self.stack = _initStack()
self.enabled_start = time()
def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace)
def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack
def disable(self):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
def __enter__(self):
"""
__enter__() -> self
"""
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Disables profiling.
"""
self.disable()
def _traceEvent(self, frame, event):
f_code = frame.f_code
lineno = frame.f_lineno
print('%10.6f%s%s %s:%s %s+%s' % (
time() - self.enabled_start,
' ' * len(self.stack[0]),
event,
f_code.co_filename,
lineno,
f_code.co_name,
lineno - f_code.co_firstlineno,
), file=sys.stderr)
def _real_global_trace(self, frame, event, arg):
local_trace = self._local_trace
if local_trace is not None:
event_time = time()
callee_entry = [event_time, 0, frame.f_lineno, event_time, 0]
stack, callee_dict = self.stack
try:
caller_entry = stack[-1]
except IndexError:
pass
else:
# Suspend caller frame
frame_time, frame_discount, lineno, line_time, line_duration = caller_entry
caller_entry[4] = event_time - line_time + line_duration
callee_dict[(frame.f_back.f_code, frame.f_code)].append(callee_entry)
stack.append(callee_entry)
return local_trace
def _real_local_trace(self, frame, event, arg):
if event == 'line' or event == 'return':
event_time = time()
stack, callee_dict = self.stack
try:
stack_entry = stack[-1]
except IndexError:
warn('Profiling stack underflow, disabling.')
self.disable()
return None
frame_time, frame_discount, lineno, line_time, line_duration = stack_entry
file_timing = self._getFileTiming(frame)
file_timing.hit(frame.f_code, lineno,
event_time - line_time + line_duration)
if event == 'line':
# Start a new line
stack_entry[2] = frame.f_lineno
stack_entry[3] = event_time
stack_entry[4] = 0
else:
# 'return' event, <frame> is still callee
# Resume caller frame
stack.pop()
stack[-1][3] = event_time
caller_frame = frame.f_back
caller_code = caller_frame.f_code
callee_code = frame.f_code
callee_entry_list = callee_dict[(caller_code, callee_code)]
callee_entry_list.pop()
call_duration = event_time - frame_time
if callee_entry_list:
# Callee is also somewhere up the stack, so discount this
# call duration from it.
callee_entry_list[-1][1] += call_duration
self._getFileTiming(caller_frame).call(
caller_code, caller_frame.f_lineno,
file_timing,
callee_code, call_duration - frame_discount,
frame,
)
return self._local_trace
# profile/cProfile-like API
def run(self, cmd):
"""Similar to profile.Profile.run ."""
import __main__
dikt = __main__.__dict__
return self.runctx(cmd, dikt, dikt)
class ThreadProfile(Profile):
"""
threading.Thread-aware version of Profile class.
Threads started after enable() call will be profiled.
After disable() call, threads will need to be switched into and trigger a
trace event (typically a "line" event) before they can notice the
disabling.
"""
__slots__ = ('_local_trace_backup', )
stack = LocalDescriptor(_initStack)
global_dict = LocalDescriptor(dict)
def __init__(self, **kw):
super(ThreadProfile, self).__init__(**kw)
self._local_trace_backup = self._local_trace
def _enable(self):
self._local_trace = self._local_trace_backup
threading.settrace(self._global_trace)
super(ThreadProfile, self)._enable()
def _disable(self):
super(ThreadProfile, self)._disable()
threading.settrace(None)
self._local_trace = None
class StatisticProfile(ProfileBase, ProfileRunnerBase):
"""
Statistic profiling class.
This class does not gather its own samples by itself.
Instead, it must be provided with call stacks (as returned by
sys._getframe() or sys._current_frames()).
"""
def __init__(self):
super(StatisticProfile, self).__init__()
self.total_time = 1
def sample(self, frame):
getFileTiming = self._getFileTiming
called_timing = getFileTiming(frame)
called_code = frame.f_code
called_timing.hit(called_code, frame.f_lineno, 0)
while True:
caller = frame.f_back
if caller is None:
break
caller_timing = getFileTiming(caller)
caller_code = caller.f_code
caller_timing.call(caller_code, caller.f_lineno,
called_timing, called_code, 0, frame)
called_timing = caller_timing
frame = caller
called_code = caller_code
def __call__(self, period=.001, single=True, group=None, name=None):
"""
Instanciate StatisticThread.
>>> s_profile = StatisticProfile()
>>> with s_profile(single=False):
>>> # Code to profile
Is equivalent to:
>>> s_profile = StatisticProfile()
>>> s_thread = StatisticThread(profiler=s_profile, single=False)
>>> with s_thread:
>>> # Code to profile
"""
return StatisticThread(
profiler=self, period=period, single=single, group=group,
name=name,
)
# BBB
StatisticalProfile = StatisticProfile
class StatisticThread(threading.Thread, ProfileRunnerBase):
"""
Usage in a nutshell:
with StatisticThread() as profiler_thread:
# do stuff
profiler_thread.profiler.print_stats()
"""
__slots__ = (
'_test',
'_start_time',
'clean_exit',
)
def __init__(self, profiler=None, period=.001, single=True, group=None, name=None):
"""
profiler (None or StatisticProfile instance)
Available on instances as the "profiler" read-only property.
If None, a new profiler instance will be created.
period (float)
How many seconds to wait between consecutive samples.
The smaller, the more profiling overhead, but the faster results
become meaningful.
The larger, the less profiling overhead, but requires long profiling
session to get meaningful results.
single (bool)
Profile only the thread which created this instance.
group, name
See Python's threading.Thread API.
"""
if profiler is None:
profiler = StatisticProfile()
if single:
self._test = lambda x, ident=threading.current_thread().ident: ident == x
else:
self._test = None
super(StatisticThread, self).__init__(
group=group,
name=name,
)
self._stop_event = threading.Event()
self._period = period
self._profiler = profiler
profiler.total_time = 0
self.daemon = True
self.clean_exit = False
@property
def profiler(self):
return self._profiler
def start(self):
self.clean_exit = False
self._can_run = True
self._start_time = time()
super(StatisticThread, self).start()
def stop(self):
"""
Request thread to stop.
Does not wait for actual termination (use join() method).
"""
if self.is_alive():
self._can_run = False
self._stop_event.set()
self._profiler.total_time += time() - self._start_time
self._start_time = None
def __enter__(self):
"""
__enter__() -> self
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Stops and joins profiling thread.
"""
self.stop()
self.join()
def run(self):
current_frames = sys._current_frames
test = self._test
if test is None:
test = lambda x, ident=threading.current_thread().ident: ident != x
sample = self._profiler.sample
stop_event = self._stop_event
wait = partial(stop_event.wait, self._period)
while self._can_run:
for ident, frame in current_frames().iteritems():
if test(ident):
sample(frame)
frame = None
wait()
stop_event.clear()
self.clean_exit = True
def callgrind(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.callgrind(*args, **kw)
def annotate(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.annotate(*args, **kw)
def dump_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.dump_stats(*args, **kw)
def print_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.print_stats(*args, **kw)
def iterSource(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.iterSource(*args, **kw)
# BBB
StatisticalThread = StatisticThread
# profile/cProfile-like API (no sort parameter !)
def _run(threads, verbose, func_name, filename, *args, **kw):
if threads:
klass = ThreadProfile
else:
klass = Profile
prof = klass(verbose=verbose)
try:
try:
getattr(prof, func_name)(*args, **kw)
except SystemExit:
pass
finally:
if filename is None:
prof.print_stats()
else:
prof.dump_stats(filename)
def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False):
"""Similar to profile.runctx ."""
_run(threads, verbose, 'runctx', filename, cmd, globals, locals)
def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1,
filename=None, threads=True, verbose=False):
"""
Run code from given file descriptor with profiling enabled.
Closes fd before executing contained code.
"""
_run(threads, verbose, 'runfile', filename, fd, argv, fd_name,
compile_flags, dont_inherit)
def runpath(path, argv, filename=None, threads=True, verbose=False):
"""
Run code from open-accessible file path with profiling enabled.
"""
_run(threads, verbose, 'runpath', filename, path, argv)
_allsep = os.sep + (os.altsep or '')
def _relpath(name):
"""
Strip absolute components from path.
Inspired from zipfile.write().
"""
return os.path.normpath(os.path.splitdrive(name)[1]).lstrip(_allsep)
def _main(argv, stdin=None):
format_dict = {
'text': 'annotate',
'callgrind': 'callgrind',
}
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('script', help='Python script to execute (optionaly '
'followed by its arguments)', nargs='?')
parser.add_argument('argv', nargs=argparse.REMAINDER)
parser.add_argument('-o', '--out', default='-',
help='Write annotated sources to this file. Defaults to stdout.')
parser.add_argument('-z', '--zipfile',
help='Name of a zip file to generate from all involved source files. '
'Useful with callgrind output.')
parser.add_argument('-t', '--threads', default=1, type=int, help='If '
'non-zero, trace threads spawned by program. Default: %(default)s')
parser.add_argument('-f', '--format', choices=format_dict,
help='Format in which output is generated. If not set, auto-detected '
'from filename if provided, falling back to "text".')
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable profiler internal tracing output. Cryptic and verbose.')
parser.add_argument('-s', '--statistic', default=0, type=float,
help='Use this period for statistic profiling, or use deterministic '
'profiling when 0.')
parser.add_argument('-m', dest='module',
help='Searches sys.path for the named module and runs the '
'corresponding .py file as a script. When given, positional arguments '
'become sys.argv[1:]')
group = parser.add_argument_group(
title='Filtering',
description='Allows excluding (and re-including) code from '
'"file names" matching regular expressions. '
'"file name" follows the semantics of python\'s "co_filename": '
'it may be a valid path, of an existing or non-existing file, '
'but it may be some arbitrary string too.'
)
group.add_argument('--exclude-syspath', action='store_true',
help='Exclude all from default "sys.path". Beware: this will also '
'exclude properly-installed non-standard modules, which may not be '
'what you want.')
group.add_argument('--exclude', action='append', default=[],
help='Exclude files whose name starts with any pattern.')
group.add_argument('--include', action='append', default=[],
help='Include files whose name would have otherwise excluded. '
'If no exclusion was specified, all paths are excluded first.')
options = parser.parse_args(argv[1:])
if options.exclude_syspath:
options.exclude.extend('^' + re.escape(x) for x in sys.path)
if options.include and not options.exclude:
options.exclude.append('') # All-matching regex
if options.verbose:
if options.exclude:
print('Excluding:', file=sys.stderr)
for regex in options.exclude:
print('\t' + regex, file=sys.stderr)
if options.include:
print('But including:', file=sys.stderr)
for regex in options.include:
print('\t' + regex, file=sys.stderr)
if options.module is None:
if options.script is None:
parser.error('too few arguments')
args = [options.script] + options.argv
runner_method_kw = {
'path': args[0],
'argv': args,
}
runner_method_id = 'runpath'
elif stdin is not None and options.module == '-':
# Undocumented way of using -m, used internaly by %%pprofile
args = ['<stdin>']
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
import __main__
runner_method_kw = {
'fd': stdin,
'argv': args,
'fd_name': '<stdin>',
'globals': __main__.__dict__,
}
runner_method_id = 'runfile'
else:
args = [options.module]
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
runner_method_kw = {
'module': options.module,
'argv': args,
}
runner_method_id = 'runmodule'
if options.format is None:
if _isCallgrindName(options.out):
options.format = 'callgrind'
else:
options.format = 'text'
relative_path = options.format == 'callgrind' and options.zipfile
if options.statistic:
prof = StatisticalProfile()
runner = StatisticalThread(
profiler=prof,
period=options.statistic,
single=not options.threads,
)
else:
if options.threads:
klass = ThreadProfile
else:
klass = Profile
prof = runner = klass(verbose=options.verbose)
try:
getattr(runner, runner_method_id)(**runner_method_kw)
finally:
if options.out == '-':
out = EncodeOrReplaceWriter(sys.stdout)
close = lambda: None
else:
out = io.open(options.out, 'w', errors='replace')
close = out.close
if options.exclude:
exclusion_search_list = [
re.compile(x).search for x in options.exclude
]
include_search_list = [
re.compile(x).search for x in options.include
]
filename_set = {
x for x in prof.getFilenameSet()
if not (
any(y(x) for y in exclusion_search_list) and
not any(y(x) for y in include_search_list)
)
}
else:
filename_set = None
commandline = quoteCommandline(args)
getattr(prof, format_dict[options.format])(
out,
filename=filename_set,
# python2 repr returns bytes, python3 repr returns unicode
commandline=getattr(
commandline,
'decode',
lambda _: commandline,
)('ascii'),
relative_path=relative_path,
)
close()
zip_path = options.zipfile
if zip_path:
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
with zipfile.ZipFile(
zip_path,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as zip_file:
for name, lines in prof.iterSource():
zip_file.writestr(
convertPath(name),
''.join(lines)
)
if options.statistic and not runner.clean_exit:
# Mostly useful for regresion testing, as exceptions raised in threads
# do not change exit status.
sys.exit(1)
def pprofile(line, cell=None):
"""
Profile line execution.
"""
if cell is None:
# TODO: detect and use arguments (statistical profiling, ...) ?
return run(line)
return _main(
['%%pprofile', '-m', '-'] + shlex.split(line),
io.StringIO(cell),
)
try:
register_line_cell_magic(pprofile)
except Exception:
# ipython can be imported, but may not be currently running.
pass
del pprofile
def main():
_main(sys.argv)
if __name__ == '__main__':
main()
|
vpelletier/pprofile | pprofile.py | runctx | python | def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False):
_run(threads, verbose, 'runctx', filename, cmd, globals, locals) | Similar to profile.runctx . | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1186-L1188 | [
"def _run(threads, verbose, func_name, filename, *args, **kw):\n if threads:\n klass = ThreadProfile\n else:\n klass = Profile\n prof = klass(verbose=verbose)\n try:\n try:\n getattr(prof, func_name)(*args, **kw)\n except SystemExit:\n pass\n finally:\n if filename is None:\n prof.print_stats()\n else:\n prof.dump_stats(filename)\n"
] | #!/usr/bin/env python
# Copyright (C) 2013-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
pprofile - Line-granularity, thread-aware deterministic and statistic
pure-python profiler
Usage as a command line:
$ pprofile --exclude-syspath some_python_executable arg1 ...
$ pprofile --exclude-syspath -m some_python_module -- arg1 ...
$ python -m pprofile --exclude-syspath some_python_executable arg1 ...
$ python -m pprofile -m some_python_module -- arg1 ...
See --help for all options.
Usage as a python module:
Deterministic profiling:
>>> prof = pprofile.Profile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
Statistic profiling:
>>> prof = StatisticalProfile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
"""
from __future__ import print_function, division
from collections import defaultdict, deque
from functools import partial, wraps
# Note: use time, not clock.
# Clock, at least on linux, ignores time not spent executing code
# (ex: time.sleep()). The goal of pprofile is not to profile python code
# execution as such (ie, to improve python interpreter), but to profile a
# possibly complex application, with its (IO) waits, sleeps, (...) so a
# developper can understand what is slow rather than what keeps the cpu busy.
# So using the wall-clock as a way to measure time spent is more meaningful.
# XXX: This said, if time() lacks precision, a better but likely
# platform-dependent wall-clock time source must be identified and used.
from time import time
from warnings import warn
import argparse
import io
import inspect
from itertools import count
import linecache
import os
# not caught by 2to3, likely because pipes.quote is not documented in python 2
try:
from pipes import quote as shlex_quote # Python 2
except ImportError:
from shlex import quote as shlex_quote # Python 3
import platform
import re
import runpy
import shlex
from subprocess import list2cmdline as windows_list2cmdline
import sys
import threading
import zipfile
try:
from IPython.core.magic import register_line_cell_magic
except ImportError:
register_line_cell_magic = lambda x: x
__all__ = (
'ProfileBase', 'ProfileRunnerBase', 'Profile', 'ThreadProfile',
'StatisticProfile', 'StatisticThread', 'run', 'runctx', 'runfile',
'runpath',
)
class BaseLineIterator(object):
def __init__(self, getline, filename, global_dict):
self._getline = getline
self._filename = filename
self._global_dict = global_dict
self._lineno = 1
def __iter__(self):
return self
def next(self):
lineno = self._lineno
self._lineno += 1
return lineno, self._getline(self._filename, lineno, self._global_dict)
if sys.version_info < (3, ):
import codecs
# Find coding specification (see PEP-0263)
_matchCoding = re.compile(
r'^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)',
).match
class LineIterator(BaseLineIterator):
_encoding = None
def __init__(self, *args, **kw):
super(LineIterator, self).__init__(*args, **kw)
# Identify encoding.
first_line = self._getline(self._filename, 1, self._global_dict)
if isinstance(first_line, bytes):
# BOM - python2 only detects the (discouraged) UTF-8 BOM
if first_line.startswith(codecs.BOM_UTF8):
self._encoding = 'utf-8'
else:
# PEP-0263: "the first or second line must match [_matchCoding]"
match = _matchCoding(first_line)
if match is None:
match = _matchCoding(
self._getline(self._filename, 2, self._global_dict),
)
if match is None:
self._encoding = 'ascii'
else:
self._encoding = match.group(1)
# else, first line is unicode.
def next(self):
lineno, line = super(LineIterator, self).next()
if self._encoding:
line = line.decode(self._encoding, errors='replace')
return lineno, line
else:
# getline returns unicode objects, nothing to do
LineIterator = BaseLineIterator
if platform.system() == 'Windows':
quoteCommandline = windows_list2cmdline
else:
def quoteCommandline(commandline):
return ' '.join(shlex_quote(x) for x in commandline)
class EncodeOrReplaceWriter(object):
"""
Write-only file-ish object which replaces unsupported chars when
underlying file rejects them.
"""
def __init__(self, out):
self._encoding = getattr(out, 'encoding', None) or 'ascii'
self._write = out.write
def write(self, data):
try:
self._write(data)
except UnicodeEncodeError:
self._write(
data.encode(
self._encoding,
errors='replace',
).decode(self._encoding),
)
def _isCallgrindName(filepath):
return os.path.basename(filepath).startswith('cachegrind.out.')
class _FileTiming(object):
"""
Accumulation of profiling statistics (line and call durations) for a given
source "file" (unique global dict).
Subclasses should be aware that:
- this classes uses __slots__, mainly for cpu efficiency (property lookup
is in a list instead of a dict)
- it can access the BaseProfile instance which created any instace using
the "profiler" property, should they share some state across source
files.
- methods on this class are profiling choke-point - keep customisations
as cheap in CPU as you can !
"""
__slots__ = ('line_dict', 'call_dict', 'filename', 'global_dict',
'profiler')
def __init__(self, filename, global_dict, profiler):
self.filename = filename
self.global_dict = global_dict
self.line_dict = defaultdict(lambda: defaultdict(lambda: [0, 0]))
self.call_dict = {}
# Note: not used in this implementation, may be used by subclasses.
self.profiler = profiler
def hit(self, code, line, duration):
"""
A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds
"""
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration
def call(self, code, line, callee_file_timing, callee, duration, frame):
"""
A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return
"""
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration
def getHitStatsFor(self, line):
total_hits = total_duration = 0
for hits, duration in self.line_dict.get(line, {}).itervalues():
total_hits += hits
total_duration += duration
return total_hits, total_duration
def getLastLine(self):
return max(
max(self.line_dict) if self.line_dict else 0,
max(x for _, x, _ in self.call_dict) if self.call_dict else 0,
)
def iterHits(self):
for line, code_dict in self.line_dict.iteritems():
for code, (hits, duration) in code_dict.iteritems():
yield line, code, hits, duration
def iterCalls(self):
for (code, line, callee), (callee_file_timing, hit, duration) in \
self.call_dict.iteritems():
yield (
line,
code,
hit, duration,
callee_file_timing.filename, callee,
)
def getCallListByLine(self):
result = defaultdict(list)
for line, code, hit, duration, callee_filename, callee in self.iterCalls():
result[line].append((
code,
hit, duration,
callee_filename, callee,
))
return result
def getTotalTime(self):
return sum(
y[1]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getTotalHitCount(self):
return sum(
y[0]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getSortKey(self):
# total duration first, then total hit count for statistical profiling
result = [0, 0]
for entry in self.line_dict.itervalues():
for hit, duration in entry.itervalues():
result[0] += duration
result[1] += hit
return result
FileTiming = _FileTiming
class LocalDescriptor(threading.local):
"""
Implementation of descriptor API for thread-local properties.
"""
def __init__(self, func=None):
"""
func (callable)
If provided, called when a missing property is accessed
(ex: accessing thread never initialised that property).
If None, AttributeError is raised.
"""
super(LocalDescriptor, self).__init__()
if func is not None:
self.func = func
def __get__(self, instance, owner):
try:
return getattr(self, str(id(instance)))
except AttributeError:
# Raises AttributeError if func was not provided.
value = self.func()
setattr(self, str(id(instance)), value)
return value
def __set__(self, instance, value):
setattr(self, str(id(instance)), value)
def __delete__(self, instance):
try:
delattr(self, str(id(instance)))
except AttributeError:
pass
_ANNOTATE_HEADER = \
u'%6s|%10s|' \
u'%13s|%13s|%7s|' \
u'Source code' % (
u'Line #', u'Hits',
u'Time', u'Time per hit', u'%',
)
_ANNOTATE_HORIZONTAL_LINE = u''.join(x == u'|' and u'+' or u'-'
for x in _ANNOTATE_HEADER)
_ANNOTATE_FORMAT = \
u'%(lineno)6i|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'%(line)s'
_ANNOTATE_CALL_FORMAT = \
u'(call)|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'# %(callee_file)s:%(callee_line)s %(callee_name)s'
def _initStack():
# frame_time: when current frame execution started/resumed last
# frame_discount: time discounted from current frame, because it appeared
# lower in the call stack from the same callsite
# lineno: latest line which execution started
# line_time: time at which latest line started being executed
# line_duration: total time spent in current line up to last resume
now = time()
return (deque([[now, 0, None, now, 0]]), defaultdict(deque))
def _verboseProfileDecorator(self):
def decorator(func):
@wraps(func)
def wrapper(frame, event, arg):
self._traceEvent(frame, event)
return func(frame, event, arg)
return wrapper
return decorator
class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def getFilenameSet(self):
"""
Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path.
"""
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
def annotate(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored.
"""
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out)
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
class ProfileRunnerBase(object):
def __call__(self):
return self
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
# profile/cProfile-like API
def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self
def runcall(self, func, *args, **kw):
"""Similar to profile.Profile.runcall ."""
with self():
return func(*args, **kw)
def runfile(self, fd, argv, fd_name='<unknown>', compile_flags=0,
dont_inherit=1, globals={}):
with fd:
code = compile(fd.read(), fd_name, 'exec', flags=compile_flags,
dont_inherit=dont_inherit)
original_sys_argv = list(sys.argv)
ctx_globals = globals.copy()
ctx_globals['__file__'] = fd_name
ctx_globals['__name__'] = '__main__'
ctx_globals['__package__'] = None
try:
sys.argv[:] = argv
return self.runctx(code, ctx_globals, None)
finally:
sys.argv[:] = original_sys_argv
def runpath(self, path, argv):
original_sys_path = list(sys.path)
try:
sys.path.insert(0, os.path.dirname(path))
return self.runfile(open(path, 'rb'), argv, fd_name=path)
finally:
sys.path[:] = original_sys_path
def runmodule(self, module, argv):
original_sys_argv = list(sys.argv)
original_sys_path0 = sys.path[0]
try:
sys.path[0] = os.getcwd()
sys.argv[:] = argv
with self():
runpy.run_module(module, run_name='__main__', alter_sys=True)
finally:
sys.argv[:] = original_sys_argv
sys.path[0] = original_sys_path0
return self
class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',
'_local_trace',
'stack',
'enabled_start',
)
def __init__(self, verbose=False):
super(Profile, self).__init__()
if verbose:
self._global_trace = _verboseProfileDecorator(self)(
self._real_global_trace)
self._local_trace = _verboseProfileDecorator(self)(
self._real_local_trace)
else:
self._global_trace = self._real_global_trace
self._local_trace = self._real_local_trace
self.stack = None
self.enabled_start = None
def _enable(self):
"""
Overload this method when subclassing. Called before actually
enabling trace.
"""
self.stack = _initStack()
self.enabled_start = time()
def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace)
def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack
def disable(self):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
def __enter__(self):
"""
__enter__() -> self
"""
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Disables profiling.
"""
self.disable()
def _traceEvent(self, frame, event):
f_code = frame.f_code
lineno = frame.f_lineno
print('%10.6f%s%s %s:%s %s+%s' % (
time() - self.enabled_start,
' ' * len(self.stack[0]),
event,
f_code.co_filename,
lineno,
f_code.co_name,
lineno - f_code.co_firstlineno,
), file=sys.stderr)
def _real_global_trace(self, frame, event, arg):
local_trace = self._local_trace
if local_trace is not None:
event_time = time()
callee_entry = [event_time, 0, frame.f_lineno, event_time, 0]
stack, callee_dict = self.stack
try:
caller_entry = stack[-1]
except IndexError:
pass
else:
# Suspend caller frame
frame_time, frame_discount, lineno, line_time, line_duration = caller_entry
caller_entry[4] = event_time - line_time + line_duration
callee_dict[(frame.f_back.f_code, frame.f_code)].append(callee_entry)
stack.append(callee_entry)
return local_trace
def _real_local_trace(self, frame, event, arg):
if event == 'line' or event == 'return':
event_time = time()
stack, callee_dict = self.stack
try:
stack_entry = stack[-1]
except IndexError:
warn('Profiling stack underflow, disabling.')
self.disable()
return None
frame_time, frame_discount, lineno, line_time, line_duration = stack_entry
file_timing = self._getFileTiming(frame)
file_timing.hit(frame.f_code, lineno,
event_time - line_time + line_duration)
if event == 'line':
# Start a new line
stack_entry[2] = frame.f_lineno
stack_entry[3] = event_time
stack_entry[4] = 0
else:
# 'return' event, <frame> is still callee
# Resume caller frame
stack.pop()
stack[-1][3] = event_time
caller_frame = frame.f_back
caller_code = caller_frame.f_code
callee_code = frame.f_code
callee_entry_list = callee_dict[(caller_code, callee_code)]
callee_entry_list.pop()
call_duration = event_time - frame_time
if callee_entry_list:
# Callee is also somewhere up the stack, so discount this
# call duration from it.
callee_entry_list[-1][1] += call_duration
self._getFileTiming(caller_frame).call(
caller_code, caller_frame.f_lineno,
file_timing,
callee_code, call_duration - frame_discount,
frame,
)
return self._local_trace
# profile/cProfile-like API
def run(self, cmd):
"""Similar to profile.Profile.run ."""
import __main__
dikt = __main__.__dict__
return self.runctx(cmd, dikt, dikt)
class ThreadProfile(Profile):
"""
threading.Thread-aware version of Profile class.
Threads started after enable() call will be profiled.
After disable() call, threads will need to be switched into and trigger a
trace event (typically a "line" event) before they can notice the
disabling.
"""
__slots__ = ('_local_trace_backup', )
stack = LocalDescriptor(_initStack)
global_dict = LocalDescriptor(dict)
def __init__(self, **kw):
super(ThreadProfile, self).__init__(**kw)
self._local_trace_backup = self._local_trace
def _enable(self):
self._local_trace = self._local_trace_backup
threading.settrace(self._global_trace)
super(ThreadProfile, self)._enable()
def _disable(self):
super(ThreadProfile, self)._disable()
threading.settrace(None)
self._local_trace = None
class StatisticProfile(ProfileBase, ProfileRunnerBase):
"""
Statistic profiling class.
This class does not gather its own samples by itself.
Instead, it must be provided with call stacks (as returned by
sys._getframe() or sys._current_frames()).
"""
def __init__(self):
super(StatisticProfile, self).__init__()
self.total_time = 1
def sample(self, frame):
getFileTiming = self._getFileTiming
called_timing = getFileTiming(frame)
called_code = frame.f_code
called_timing.hit(called_code, frame.f_lineno, 0)
while True:
caller = frame.f_back
if caller is None:
break
caller_timing = getFileTiming(caller)
caller_code = caller.f_code
caller_timing.call(caller_code, caller.f_lineno,
called_timing, called_code, 0, frame)
called_timing = caller_timing
frame = caller
called_code = caller_code
def __call__(self, period=.001, single=True, group=None, name=None):
"""
Instanciate StatisticThread.
>>> s_profile = StatisticProfile()
>>> with s_profile(single=False):
>>> # Code to profile
Is equivalent to:
>>> s_profile = StatisticProfile()
>>> s_thread = StatisticThread(profiler=s_profile, single=False)
>>> with s_thread:
>>> # Code to profile
"""
return StatisticThread(
profiler=self, period=period, single=single, group=group,
name=name,
)
# BBB
StatisticalProfile = StatisticProfile
class StatisticThread(threading.Thread, ProfileRunnerBase):
"""
Usage in a nutshell:
with StatisticThread() as profiler_thread:
# do stuff
profiler_thread.profiler.print_stats()
"""
__slots__ = (
'_test',
'_start_time',
'clean_exit',
)
def __init__(self, profiler=None, period=.001, single=True, group=None, name=None):
"""
profiler (None or StatisticProfile instance)
Available on instances as the "profiler" read-only property.
If None, a new profiler instance will be created.
period (float)
How many seconds to wait between consecutive samples.
The smaller, the more profiling overhead, but the faster results
become meaningful.
The larger, the less profiling overhead, but requires long profiling
session to get meaningful results.
single (bool)
Profile only the thread which created this instance.
group, name
See Python's threading.Thread API.
"""
if profiler is None:
profiler = StatisticProfile()
if single:
self._test = lambda x, ident=threading.current_thread().ident: ident == x
else:
self._test = None
super(StatisticThread, self).__init__(
group=group,
name=name,
)
self._stop_event = threading.Event()
self._period = period
self._profiler = profiler
profiler.total_time = 0
self.daemon = True
self.clean_exit = False
@property
def profiler(self):
return self._profiler
def start(self):
self.clean_exit = False
self._can_run = True
self._start_time = time()
super(StatisticThread, self).start()
def stop(self):
"""
Request thread to stop.
Does not wait for actual termination (use join() method).
"""
if self.is_alive():
self._can_run = False
self._stop_event.set()
self._profiler.total_time += time() - self._start_time
self._start_time = None
def __enter__(self):
"""
__enter__() -> self
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Stops and joins profiling thread.
"""
self.stop()
self.join()
def run(self):
current_frames = sys._current_frames
test = self._test
if test is None:
test = lambda x, ident=threading.current_thread().ident: ident != x
sample = self._profiler.sample
stop_event = self._stop_event
wait = partial(stop_event.wait, self._period)
while self._can_run:
for ident, frame in current_frames().iteritems():
if test(ident):
sample(frame)
frame = None
wait()
stop_event.clear()
self.clean_exit = True
def callgrind(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.callgrind(*args, **kw)
def annotate(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.annotate(*args, **kw)
def dump_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.dump_stats(*args, **kw)
def print_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.print_stats(*args, **kw)
def iterSource(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.iterSource(*args, **kw)
# BBB
StatisticalThread = StatisticThread
# profile/cProfile-like API (no sort parameter !)
def _run(threads, verbose, func_name, filename, *args, **kw):
if threads:
klass = ThreadProfile
else:
klass = Profile
prof = klass(verbose=verbose)
try:
try:
getattr(prof, func_name)(*args, **kw)
except SystemExit:
pass
finally:
if filename is None:
prof.print_stats()
else:
prof.dump_stats(filename)
def run(cmd, filename=None, threads=True, verbose=False):
"""Similar to profile.run ."""
_run(threads, verbose, 'run', filename, cmd)
def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1,
filename=None, threads=True, verbose=False):
"""
Run code from given file descriptor with profiling enabled.
Closes fd before executing contained code.
"""
_run(threads, verbose, 'runfile', filename, fd, argv, fd_name,
compile_flags, dont_inherit)
def runpath(path, argv, filename=None, threads=True, verbose=False):
"""
Run code from open-accessible file path with profiling enabled.
"""
_run(threads, verbose, 'runpath', filename, path, argv)
_allsep = os.sep + (os.altsep or '')
def _relpath(name):
"""
Strip absolute components from path.
Inspired from zipfile.write().
"""
return os.path.normpath(os.path.splitdrive(name)[1]).lstrip(_allsep)
def _main(argv, stdin=None):
format_dict = {
'text': 'annotate',
'callgrind': 'callgrind',
}
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('script', help='Python script to execute (optionaly '
'followed by its arguments)', nargs='?')
parser.add_argument('argv', nargs=argparse.REMAINDER)
parser.add_argument('-o', '--out', default='-',
help='Write annotated sources to this file. Defaults to stdout.')
parser.add_argument('-z', '--zipfile',
help='Name of a zip file to generate from all involved source files. '
'Useful with callgrind output.')
parser.add_argument('-t', '--threads', default=1, type=int, help='If '
'non-zero, trace threads spawned by program. Default: %(default)s')
parser.add_argument('-f', '--format', choices=format_dict,
help='Format in which output is generated. If not set, auto-detected '
'from filename if provided, falling back to "text".')
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable profiler internal tracing output. Cryptic and verbose.')
parser.add_argument('-s', '--statistic', default=0, type=float,
help='Use this period for statistic profiling, or use deterministic '
'profiling when 0.')
parser.add_argument('-m', dest='module',
help='Searches sys.path for the named module and runs the '
'corresponding .py file as a script. When given, positional arguments '
'become sys.argv[1:]')
group = parser.add_argument_group(
title='Filtering',
description='Allows excluding (and re-including) code from '
'"file names" matching regular expressions. '
'"file name" follows the semantics of python\'s "co_filename": '
'it may be a valid path, of an existing or non-existing file, '
'but it may be some arbitrary string too.'
)
group.add_argument('--exclude-syspath', action='store_true',
help='Exclude all from default "sys.path". Beware: this will also '
'exclude properly-installed non-standard modules, which may not be '
'what you want.')
group.add_argument('--exclude', action='append', default=[],
help='Exclude files whose name starts with any pattern.')
group.add_argument('--include', action='append', default=[],
help='Include files whose name would have otherwise excluded. '
'If no exclusion was specified, all paths are excluded first.')
options = parser.parse_args(argv[1:])
if options.exclude_syspath:
options.exclude.extend('^' + re.escape(x) for x in sys.path)
if options.include and not options.exclude:
options.exclude.append('') # All-matching regex
if options.verbose:
if options.exclude:
print('Excluding:', file=sys.stderr)
for regex in options.exclude:
print('\t' + regex, file=sys.stderr)
if options.include:
print('But including:', file=sys.stderr)
for regex in options.include:
print('\t' + regex, file=sys.stderr)
if options.module is None:
if options.script is None:
parser.error('too few arguments')
args = [options.script] + options.argv
runner_method_kw = {
'path': args[0],
'argv': args,
}
runner_method_id = 'runpath'
elif stdin is not None and options.module == '-':
# Undocumented way of using -m, used internaly by %%pprofile
args = ['<stdin>']
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
import __main__
runner_method_kw = {
'fd': stdin,
'argv': args,
'fd_name': '<stdin>',
'globals': __main__.__dict__,
}
runner_method_id = 'runfile'
else:
args = [options.module]
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
runner_method_kw = {
'module': options.module,
'argv': args,
}
runner_method_id = 'runmodule'
if options.format is None:
if _isCallgrindName(options.out):
options.format = 'callgrind'
else:
options.format = 'text'
relative_path = options.format == 'callgrind' and options.zipfile
if options.statistic:
prof = StatisticalProfile()
runner = StatisticalThread(
profiler=prof,
period=options.statistic,
single=not options.threads,
)
else:
if options.threads:
klass = ThreadProfile
else:
klass = Profile
prof = runner = klass(verbose=options.verbose)
try:
getattr(runner, runner_method_id)(**runner_method_kw)
finally:
if options.out == '-':
out = EncodeOrReplaceWriter(sys.stdout)
close = lambda: None
else:
out = io.open(options.out, 'w', errors='replace')
close = out.close
if options.exclude:
exclusion_search_list = [
re.compile(x).search for x in options.exclude
]
include_search_list = [
re.compile(x).search for x in options.include
]
filename_set = {
x for x in prof.getFilenameSet()
if not (
any(y(x) for y in exclusion_search_list) and
not any(y(x) for y in include_search_list)
)
}
else:
filename_set = None
commandline = quoteCommandline(args)
getattr(prof, format_dict[options.format])(
out,
filename=filename_set,
# python2 repr returns bytes, python3 repr returns unicode
commandline=getattr(
commandline,
'decode',
lambda _: commandline,
)('ascii'),
relative_path=relative_path,
)
close()
zip_path = options.zipfile
if zip_path:
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
with zipfile.ZipFile(
zip_path,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as zip_file:
for name, lines in prof.iterSource():
zip_file.writestr(
convertPath(name),
''.join(lines)
)
if options.statistic and not runner.clean_exit:
# Mostly useful for regresion testing, as exceptions raised in threads
# do not change exit status.
sys.exit(1)
def pprofile(line, cell=None):
"""
Profile line execution.
"""
if cell is None:
# TODO: detect and use arguments (statistical profiling, ...) ?
return run(line)
return _main(
['%%pprofile', '-m', '-'] + shlex.split(line),
io.StringIO(cell),
)
try:
register_line_cell_magic(pprofile)
except Exception:
# ipython can be imported, but may not be currently running.
pass
del pprofile
def main():
_main(sys.argv)
if __name__ == '__main__':
main()
|
vpelletier/pprofile | pprofile.py | runfile | python | def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1,
filename=None, threads=True, verbose=False):
_run(threads, verbose, 'runfile', filename, fd, argv, fd_name,
compile_flags, dont_inherit) | Run code from given file descriptor with profiling enabled.
Closes fd before executing contained code. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1190-L1197 | [
"def _run(threads, verbose, func_name, filename, *args, **kw):\n if threads:\n klass = ThreadProfile\n else:\n klass = Profile\n prof = klass(verbose=verbose)\n try:\n try:\n getattr(prof, func_name)(*args, **kw)\n except SystemExit:\n pass\n finally:\n if filename is None:\n prof.print_stats()\n else:\n prof.dump_stats(filename)\n"
] | #!/usr/bin/env python
# Copyright (C) 2013-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
pprofile - Line-granularity, thread-aware deterministic and statistic
pure-python profiler
Usage as a command line:
$ pprofile --exclude-syspath some_python_executable arg1 ...
$ pprofile --exclude-syspath -m some_python_module -- arg1 ...
$ python -m pprofile --exclude-syspath some_python_executable arg1 ...
$ python -m pprofile -m some_python_module -- arg1 ...
See --help for all options.
Usage as a python module:
Deterministic profiling:
>>> prof = pprofile.Profile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
Statistic profiling:
>>> prof = StatisticalProfile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
"""
from __future__ import print_function, division
from collections import defaultdict, deque
from functools import partial, wraps
# Note: use time, not clock.
# Clock, at least on linux, ignores time not spent executing code
# (ex: time.sleep()). The goal of pprofile is not to profile python code
# execution as such (ie, to improve python interpreter), but to profile a
# possibly complex application, with its (IO) waits, sleeps, (...) so a
# developper can understand what is slow rather than what keeps the cpu busy.
# So using the wall-clock as a way to measure time spent is more meaningful.
# XXX: This said, if time() lacks precision, a better but likely
# platform-dependent wall-clock time source must be identified and used.
from time import time
from warnings import warn
import argparse
import io
import inspect
from itertools import count
import linecache
import os
# not caught by 2to3, likely because pipes.quote is not documented in python 2
try:
from pipes import quote as shlex_quote # Python 2
except ImportError:
from shlex import quote as shlex_quote # Python 3
import platform
import re
import runpy
import shlex
from subprocess import list2cmdline as windows_list2cmdline
import sys
import threading
import zipfile
try:
from IPython.core.magic import register_line_cell_magic
except ImportError:
register_line_cell_magic = lambda x: x
__all__ = (
'ProfileBase', 'ProfileRunnerBase', 'Profile', 'ThreadProfile',
'StatisticProfile', 'StatisticThread', 'run', 'runctx', 'runfile',
'runpath',
)
class BaseLineIterator(object):
def __init__(self, getline, filename, global_dict):
self._getline = getline
self._filename = filename
self._global_dict = global_dict
self._lineno = 1
def __iter__(self):
return self
def next(self):
lineno = self._lineno
self._lineno += 1
return lineno, self._getline(self._filename, lineno, self._global_dict)
if sys.version_info < (3, ):
import codecs
# Find coding specification (see PEP-0263)
_matchCoding = re.compile(
r'^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)',
).match
class LineIterator(BaseLineIterator):
_encoding = None
def __init__(self, *args, **kw):
super(LineIterator, self).__init__(*args, **kw)
# Identify encoding.
first_line = self._getline(self._filename, 1, self._global_dict)
if isinstance(first_line, bytes):
# BOM - python2 only detects the (discouraged) UTF-8 BOM
if first_line.startswith(codecs.BOM_UTF8):
self._encoding = 'utf-8'
else:
# PEP-0263: "the first or second line must match [_matchCoding]"
match = _matchCoding(first_line)
if match is None:
match = _matchCoding(
self._getline(self._filename, 2, self._global_dict),
)
if match is None:
self._encoding = 'ascii'
else:
self._encoding = match.group(1)
# else, first line is unicode.
def next(self):
lineno, line = super(LineIterator, self).next()
if self._encoding:
line = line.decode(self._encoding, errors='replace')
return lineno, line
else:
# getline returns unicode objects, nothing to do
LineIterator = BaseLineIterator
if platform.system() == 'Windows':
quoteCommandline = windows_list2cmdline
else:
def quoteCommandline(commandline):
return ' '.join(shlex_quote(x) for x in commandline)
class EncodeOrReplaceWriter(object):
"""
Write-only file-ish object which replaces unsupported chars when
underlying file rejects them.
"""
def __init__(self, out):
self._encoding = getattr(out, 'encoding', None) or 'ascii'
self._write = out.write
def write(self, data):
try:
self._write(data)
except UnicodeEncodeError:
self._write(
data.encode(
self._encoding,
errors='replace',
).decode(self._encoding),
)
def _isCallgrindName(filepath):
return os.path.basename(filepath).startswith('cachegrind.out.')
class _FileTiming(object):
"""
Accumulation of profiling statistics (line and call durations) for a given
source "file" (unique global dict).
Subclasses should be aware that:
- this classes uses __slots__, mainly for cpu efficiency (property lookup
is in a list instead of a dict)
- it can access the BaseProfile instance which created any instace using
the "profiler" property, should they share some state across source
files.
- methods on this class are profiling choke-point - keep customisations
as cheap in CPU as you can !
"""
__slots__ = ('line_dict', 'call_dict', 'filename', 'global_dict',
'profiler')
def __init__(self, filename, global_dict, profiler):
self.filename = filename
self.global_dict = global_dict
self.line_dict = defaultdict(lambda: defaultdict(lambda: [0, 0]))
self.call_dict = {}
# Note: not used in this implementation, may be used by subclasses.
self.profiler = profiler
def hit(self, code, line, duration):
"""
A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds
"""
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration
def call(self, code, line, callee_file_timing, callee, duration, frame):
"""
A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return
"""
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration
def getHitStatsFor(self, line):
total_hits = total_duration = 0
for hits, duration in self.line_dict.get(line, {}).itervalues():
total_hits += hits
total_duration += duration
return total_hits, total_duration
def getLastLine(self):
return max(
max(self.line_dict) if self.line_dict else 0,
max(x for _, x, _ in self.call_dict) if self.call_dict else 0,
)
def iterHits(self):
for line, code_dict in self.line_dict.iteritems():
for code, (hits, duration) in code_dict.iteritems():
yield line, code, hits, duration
def iterCalls(self):
for (code, line, callee), (callee_file_timing, hit, duration) in \
self.call_dict.iteritems():
yield (
line,
code,
hit, duration,
callee_file_timing.filename, callee,
)
def getCallListByLine(self):
result = defaultdict(list)
for line, code, hit, duration, callee_filename, callee in self.iterCalls():
result[line].append((
code,
hit, duration,
callee_filename, callee,
))
return result
def getTotalTime(self):
return sum(
y[1]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getTotalHitCount(self):
return sum(
y[0]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getSortKey(self):
# total duration first, then total hit count for statistical profiling
result = [0, 0]
for entry in self.line_dict.itervalues():
for hit, duration in entry.itervalues():
result[0] += duration
result[1] += hit
return result
FileTiming = _FileTiming
class LocalDescriptor(threading.local):
"""
Implementation of descriptor API for thread-local properties.
"""
def __init__(self, func=None):
"""
func (callable)
If provided, called when a missing property is accessed
(ex: accessing thread never initialised that property).
If None, AttributeError is raised.
"""
super(LocalDescriptor, self).__init__()
if func is not None:
self.func = func
def __get__(self, instance, owner):
try:
return getattr(self, str(id(instance)))
except AttributeError:
# Raises AttributeError if func was not provided.
value = self.func()
setattr(self, str(id(instance)), value)
return value
def __set__(self, instance, value):
setattr(self, str(id(instance)), value)
def __delete__(self, instance):
try:
delattr(self, str(id(instance)))
except AttributeError:
pass
_ANNOTATE_HEADER = \
u'%6s|%10s|' \
u'%13s|%13s|%7s|' \
u'Source code' % (
u'Line #', u'Hits',
u'Time', u'Time per hit', u'%',
)
_ANNOTATE_HORIZONTAL_LINE = u''.join(x == u'|' and u'+' or u'-'
for x in _ANNOTATE_HEADER)
_ANNOTATE_FORMAT = \
u'%(lineno)6i|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'%(line)s'
_ANNOTATE_CALL_FORMAT = \
u'(call)|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'# %(callee_file)s:%(callee_line)s %(callee_name)s'
def _initStack():
# frame_time: when current frame execution started/resumed last
# frame_discount: time discounted from current frame, because it appeared
# lower in the call stack from the same callsite
# lineno: latest line which execution started
# line_time: time at which latest line started being executed
# line_duration: total time spent in current line up to last resume
now = time()
return (deque([[now, 0, None, now, 0]]), defaultdict(deque))
def _verboseProfileDecorator(self):
def decorator(func):
@wraps(func)
def wrapper(frame, event, arg):
self._traceEvent(frame, event)
return func(frame, event, arg)
return wrapper
return decorator
class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def getFilenameSet(self):
"""
Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path.
"""
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
def annotate(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored.
"""
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out)
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
class ProfileRunnerBase(object):
def __call__(self):
return self
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
# profile/cProfile-like API
def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self
def runcall(self, func, *args, **kw):
"""Similar to profile.Profile.runcall ."""
with self():
return func(*args, **kw)
def runfile(self, fd, argv, fd_name='<unknown>', compile_flags=0,
dont_inherit=1, globals={}):
with fd:
code = compile(fd.read(), fd_name, 'exec', flags=compile_flags,
dont_inherit=dont_inherit)
original_sys_argv = list(sys.argv)
ctx_globals = globals.copy()
ctx_globals['__file__'] = fd_name
ctx_globals['__name__'] = '__main__'
ctx_globals['__package__'] = None
try:
sys.argv[:] = argv
return self.runctx(code, ctx_globals, None)
finally:
sys.argv[:] = original_sys_argv
def runpath(self, path, argv):
original_sys_path = list(sys.path)
try:
sys.path.insert(0, os.path.dirname(path))
return self.runfile(open(path, 'rb'), argv, fd_name=path)
finally:
sys.path[:] = original_sys_path
def runmodule(self, module, argv):
original_sys_argv = list(sys.argv)
original_sys_path0 = sys.path[0]
try:
sys.path[0] = os.getcwd()
sys.argv[:] = argv
with self():
runpy.run_module(module, run_name='__main__', alter_sys=True)
finally:
sys.argv[:] = original_sys_argv
sys.path[0] = original_sys_path0
return self
class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',
'_local_trace',
'stack',
'enabled_start',
)
def __init__(self, verbose=False):
super(Profile, self).__init__()
if verbose:
self._global_trace = _verboseProfileDecorator(self)(
self._real_global_trace)
self._local_trace = _verboseProfileDecorator(self)(
self._real_local_trace)
else:
self._global_trace = self._real_global_trace
self._local_trace = self._real_local_trace
self.stack = None
self.enabled_start = None
def _enable(self):
"""
Overload this method when subclassing. Called before actually
enabling trace.
"""
self.stack = _initStack()
self.enabled_start = time()
def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace)
def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack
def disable(self):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
def __enter__(self):
"""
__enter__() -> self
"""
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Disables profiling.
"""
self.disable()
def _traceEvent(self, frame, event):
f_code = frame.f_code
lineno = frame.f_lineno
print('%10.6f%s%s %s:%s %s+%s' % (
time() - self.enabled_start,
' ' * len(self.stack[0]),
event,
f_code.co_filename,
lineno,
f_code.co_name,
lineno - f_code.co_firstlineno,
), file=sys.stderr)
def _real_global_trace(self, frame, event, arg):
local_trace = self._local_trace
if local_trace is not None:
event_time = time()
callee_entry = [event_time, 0, frame.f_lineno, event_time, 0]
stack, callee_dict = self.stack
try:
caller_entry = stack[-1]
except IndexError:
pass
else:
# Suspend caller frame
frame_time, frame_discount, lineno, line_time, line_duration = caller_entry
caller_entry[4] = event_time - line_time + line_duration
callee_dict[(frame.f_back.f_code, frame.f_code)].append(callee_entry)
stack.append(callee_entry)
return local_trace
def _real_local_trace(self, frame, event, arg):
if event == 'line' or event == 'return':
event_time = time()
stack, callee_dict = self.stack
try:
stack_entry = stack[-1]
except IndexError:
warn('Profiling stack underflow, disabling.')
self.disable()
return None
frame_time, frame_discount, lineno, line_time, line_duration = stack_entry
file_timing = self._getFileTiming(frame)
file_timing.hit(frame.f_code, lineno,
event_time - line_time + line_duration)
if event == 'line':
# Start a new line
stack_entry[2] = frame.f_lineno
stack_entry[3] = event_time
stack_entry[4] = 0
else:
# 'return' event, <frame> is still callee
# Resume caller frame
stack.pop()
stack[-1][3] = event_time
caller_frame = frame.f_back
caller_code = caller_frame.f_code
callee_code = frame.f_code
callee_entry_list = callee_dict[(caller_code, callee_code)]
callee_entry_list.pop()
call_duration = event_time - frame_time
if callee_entry_list:
# Callee is also somewhere up the stack, so discount this
# call duration from it.
callee_entry_list[-1][1] += call_duration
self._getFileTiming(caller_frame).call(
caller_code, caller_frame.f_lineno,
file_timing,
callee_code, call_duration - frame_discount,
frame,
)
return self._local_trace
# profile/cProfile-like API
def run(self, cmd):
"""Similar to profile.Profile.run ."""
import __main__
dikt = __main__.__dict__
return self.runctx(cmd, dikt, dikt)
class ThreadProfile(Profile):
"""
threading.Thread-aware version of Profile class.
Threads started after enable() call will be profiled.
After disable() call, threads will need to be switched into and trigger a
trace event (typically a "line" event) before they can notice the
disabling.
"""
__slots__ = ('_local_trace_backup', )
stack = LocalDescriptor(_initStack)
global_dict = LocalDescriptor(dict)
def __init__(self, **kw):
super(ThreadProfile, self).__init__(**kw)
self._local_trace_backup = self._local_trace
def _enable(self):
self._local_trace = self._local_trace_backup
threading.settrace(self._global_trace)
super(ThreadProfile, self)._enable()
def _disable(self):
super(ThreadProfile, self)._disable()
threading.settrace(None)
self._local_trace = None
class StatisticProfile(ProfileBase, ProfileRunnerBase):
"""
Statistic profiling class.
This class does not gather its own samples by itself.
Instead, it must be provided with call stacks (as returned by
sys._getframe() or sys._current_frames()).
"""
def __init__(self):
super(StatisticProfile, self).__init__()
self.total_time = 1
def sample(self, frame):
getFileTiming = self._getFileTiming
called_timing = getFileTiming(frame)
called_code = frame.f_code
called_timing.hit(called_code, frame.f_lineno, 0)
while True:
caller = frame.f_back
if caller is None:
break
caller_timing = getFileTiming(caller)
caller_code = caller.f_code
caller_timing.call(caller_code, caller.f_lineno,
called_timing, called_code, 0, frame)
called_timing = caller_timing
frame = caller
called_code = caller_code
def __call__(self, period=.001, single=True, group=None, name=None):
"""
Instanciate StatisticThread.
>>> s_profile = StatisticProfile()
>>> with s_profile(single=False):
>>> # Code to profile
Is equivalent to:
>>> s_profile = StatisticProfile()
>>> s_thread = StatisticThread(profiler=s_profile, single=False)
>>> with s_thread:
>>> # Code to profile
"""
return StatisticThread(
profiler=self, period=period, single=single, group=group,
name=name,
)
# BBB
StatisticalProfile = StatisticProfile
class StatisticThread(threading.Thread, ProfileRunnerBase):
"""
Usage in a nutshell:
with StatisticThread() as profiler_thread:
# do stuff
profiler_thread.profiler.print_stats()
"""
__slots__ = (
'_test',
'_start_time',
'clean_exit',
)
def __init__(self, profiler=None, period=.001, single=True, group=None, name=None):
"""
profiler (None or StatisticProfile instance)
Available on instances as the "profiler" read-only property.
If None, a new profiler instance will be created.
period (float)
How many seconds to wait between consecutive samples.
The smaller, the more profiling overhead, but the faster results
become meaningful.
The larger, the less profiling overhead, but requires long profiling
session to get meaningful results.
single (bool)
Profile only the thread which created this instance.
group, name
See Python's threading.Thread API.
"""
if profiler is None:
profiler = StatisticProfile()
if single:
self._test = lambda x, ident=threading.current_thread().ident: ident == x
else:
self._test = None
super(StatisticThread, self).__init__(
group=group,
name=name,
)
self._stop_event = threading.Event()
self._period = period
self._profiler = profiler
profiler.total_time = 0
self.daemon = True
self.clean_exit = False
@property
def profiler(self):
return self._profiler
def start(self):
self.clean_exit = False
self._can_run = True
self._start_time = time()
super(StatisticThread, self).start()
def stop(self):
"""
Request thread to stop.
Does not wait for actual termination (use join() method).
"""
if self.is_alive():
self._can_run = False
self._stop_event.set()
self._profiler.total_time += time() - self._start_time
self._start_time = None
def __enter__(self):
"""
__enter__() -> self
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Stops and joins profiling thread.
"""
self.stop()
self.join()
def run(self):
current_frames = sys._current_frames
test = self._test
if test is None:
test = lambda x, ident=threading.current_thread().ident: ident != x
sample = self._profiler.sample
stop_event = self._stop_event
wait = partial(stop_event.wait, self._period)
while self._can_run:
for ident, frame in current_frames().iteritems():
if test(ident):
sample(frame)
frame = None
wait()
stop_event.clear()
self.clean_exit = True
def callgrind(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.callgrind(*args, **kw)
def annotate(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.annotate(*args, **kw)
def dump_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.dump_stats(*args, **kw)
def print_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.print_stats(*args, **kw)
def iterSource(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.iterSource(*args, **kw)
# BBB
StatisticalThread = StatisticThread
# profile/cProfile-like API (no sort parameter !)
def _run(threads, verbose, func_name, filename, *args, **kw):
if threads:
klass = ThreadProfile
else:
klass = Profile
prof = klass(verbose=verbose)
try:
try:
getattr(prof, func_name)(*args, **kw)
except SystemExit:
pass
finally:
if filename is None:
prof.print_stats()
else:
prof.dump_stats(filename)
def run(cmd, filename=None, threads=True, verbose=False):
"""Similar to profile.run ."""
_run(threads, verbose, 'run', filename, cmd)
def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False):
"""Similar to profile.runctx ."""
_run(threads, verbose, 'runctx', filename, cmd, globals, locals)
def runpath(path, argv, filename=None, threads=True, verbose=False):
"""
Run code from open-accessible file path with profiling enabled.
"""
_run(threads, verbose, 'runpath', filename, path, argv)
_allsep = os.sep + (os.altsep or '')
def _relpath(name):
"""
Strip absolute components from path.
Inspired from zipfile.write().
"""
return os.path.normpath(os.path.splitdrive(name)[1]).lstrip(_allsep)
def _main(argv, stdin=None):
format_dict = {
'text': 'annotate',
'callgrind': 'callgrind',
}
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('script', help='Python script to execute (optionaly '
'followed by its arguments)', nargs='?')
parser.add_argument('argv', nargs=argparse.REMAINDER)
parser.add_argument('-o', '--out', default='-',
help='Write annotated sources to this file. Defaults to stdout.')
parser.add_argument('-z', '--zipfile',
help='Name of a zip file to generate from all involved source files. '
'Useful with callgrind output.')
parser.add_argument('-t', '--threads', default=1, type=int, help='If '
'non-zero, trace threads spawned by program. Default: %(default)s')
parser.add_argument('-f', '--format', choices=format_dict,
help='Format in which output is generated. If not set, auto-detected '
'from filename if provided, falling back to "text".')
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable profiler internal tracing output. Cryptic and verbose.')
parser.add_argument('-s', '--statistic', default=0, type=float,
help='Use this period for statistic profiling, or use deterministic '
'profiling when 0.')
parser.add_argument('-m', dest='module',
help='Searches sys.path for the named module and runs the '
'corresponding .py file as a script. When given, positional arguments '
'become sys.argv[1:]')
group = parser.add_argument_group(
title='Filtering',
description='Allows excluding (and re-including) code from '
'"file names" matching regular expressions. '
'"file name" follows the semantics of python\'s "co_filename": '
'it may be a valid path, of an existing or non-existing file, '
'but it may be some arbitrary string too.'
)
group.add_argument('--exclude-syspath', action='store_true',
help='Exclude all from default "sys.path". Beware: this will also '
'exclude properly-installed non-standard modules, which may not be '
'what you want.')
group.add_argument('--exclude', action='append', default=[],
help='Exclude files whose name starts with any pattern.')
group.add_argument('--include', action='append', default=[],
help='Include files whose name would have otherwise excluded. '
'If no exclusion was specified, all paths are excluded first.')
options = parser.parse_args(argv[1:])
if options.exclude_syspath:
options.exclude.extend('^' + re.escape(x) for x in sys.path)
if options.include and not options.exclude:
options.exclude.append('') # All-matching regex
if options.verbose:
if options.exclude:
print('Excluding:', file=sys.stderr)
for regex in options.exclude:
print('\t' + regex, file=sys.stderr)
if options.include:
print('But including:', file=sys.stderr)
for regex in options.include:
print('\t' + regex, file=sys.stderr)
if options.module is None:
if options.script is None:
parser.error('too few arguments')
args = [options.script] + options.argv
runner_method_kw = {
'path': args[0],
'argv': args,
}
runner_method_id = 'runpath'
elif stdin is not None and options.module == '-':
# Undocumented way of using -m, used internaly by %%pprofile
args = ['<stdin>']
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
import __main__
runner_method_kw = {
'fd': stdin,
'argv': args,
'fd_name': '<stdin>',
'globals': __main__.__dict__,
}
runner_method_id = 'runfile'
else:
args = [options.module]
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
runner_method_kw = {
'module': options.module,
'argv': args,
}
runner_method_id = 'runmodule'
if options.format is None:
if _isCallgrindName(options.out):
options.format = 'callgrind'
else:
options.format = 'text'
relative_path = options.format == 'callgrind' and options.zipfile
if options.statistic:
prof = StatisticalProfile()
runner = StatisticalThread(
profiler=prof,
period=options.statistic,
single=not options.threads,
)
else:
if options.threads:
klass = ThreadProfile
else:
klass = Profile
prof = runner = klass(verbose=options.verbose)
try:
getattr(runner, runner_method_id)(**runner_method_kw)
finally:
if options.out == '-':
out = EncodeOrReplaceWriter(sys.stdout)
close = lambda: None
else:
out = io.open(options.out, 'w', errors='replace')
close = out.close
if options.exclude:
exclusion_search_list = [
re.compile(x).search for x in options.exclude
]
include_search_list = [
re.compile(x).search for x in options.include
]
filename_set = {
x for x in prof.getFilenameSet()
if not (
any(y(x) for y in exclusion_search_list) and
not any(y(x) for y in include_search_list)
)
}
else:
filename_set = None
commandline = quoteCommandline(args)
getattr(prof, format_dict[options.format])(
out,
filename=filename_set,
# python2 repr returns bytes, python3 repr returns unicode
commandline=getattr(
commandline,
'decode',
lambda _: commandline,
)('ascii'),
relative_path=relative_path,
)
close()
zip_path = options.zipfile
if zip_path:
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
with zipfile.ZipFile(
zip_path,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as zip_file:
for name, lines in prof.iterSource():
zip_file.writestr(
convertPath(name),
''.join(lines)
)
if options.statistic and not runner.clean_exit:
# Mostly useful for regresion testing, as exceptions raised in threads
# do not change exit status.
sys.exit(1)
def pprofile(line, cell=None):
"""
Profile line execution.
"""
if cell is None:
# TODO: detect and use arguments (statistical profiling, ...) ?
return run(line)
return _main(
['%%pprofile', '-m', '-'] + shlex.split(line),
io.StringIO(cell),
)
try:
register_line_cell_magic(pprofile)
except Exception:
# ipython can be imported, but may not be currently running.
pass
del pprofile
def main():
_main(sys.argv)
if __name__ == '__main__':
main()
|
vpelletier/pprofile | pprofile.py | runpath | python | def runpath(path, argv, filename=None, threads=True, verbose=False):
_run(threads, verbose, 'runpath', filename, path, argv) | Run code from open-accessible file path with profiling enabled. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1199-L1203 | [
"def _run(threads, verbose, func_name, filename, *args, **kw):\n if threads:\n klass = ThreadProfile\n else:\n klass = Profile\n prof = klass(verbose=verbose)\n try:\n try:\n getattr(prof, func_name)(*args, **kw)\n except SystemExit:\n pass\n finally:\n if filename is None:\n prof.print_stats()\n else:\n prof.dump_stats(filename)\n"
] | #!/usr/bin/env python
# Copyright (C) 2013-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
pprofile - Line-granularity, thread-aware deterministic and statistic
pure-python profiler
Usage as a command line:
$ pprofile --exclude-syspath some_python_executable arg1 ...
$ pprofile --exclude-syspath -m some_python_module -- arg1 ...
$ python -m pprofile --exclude-syspath some_python_executable arg1 ...
$ python -m pprofile -m some_python_module -- arg1 ...
See --help for all options.
Usage as a python module:
Deterministic profiling:
>>> prof = pprofile.Profile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
Statistic profiling:
>>> prof = StatisticalProfile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
"""
from __future__ import print_function, division
from collections import defaultdict, deque
from functools import partial, wraps
# Note: use time, not clock.
# Clock, at least on linux, ignores time not spent executing code
# (ex: time.sleep()). The goal of pprofile is not to profile python code
# execution as such (ie, to improve python interpreter), but to profile a
# possibly complex application, with its (IO) waits, sleeps, (...) so a
# developper can understand what is slow rather than what keeps the cpu busy.
# So using the wall-clock as a way to measure time spent is more meaningful.
# XXX: This said, if time() lacks precision, a better but likely
# platform-dependent wall-clock time source must be identified and used.
from time import time
from warnings import warn
import argparse
import io
import inspect
from itertools import count
import linecache
import os
# not caught by 2to3, likely because pipes.quote is not documented in python 2
try:
from pipes import quote as shlex_quote # Python 2
except ImportError:
from shlex import quote as shlex_quote # Python 3
import platform
import re
import runpy
import shlex
from subprocess import list2cmdline as windows_list2cmdline
import sys
import threading
import zipfile
try:
from IPython.core.magic import register_line_cell_magic
except ImportError:
register_line_cell_magic = lambda x: x
__all__ = (
'ProfileBase', 'ProfileRunnerBase', 'Profile', 'ThreadProfile',
'StatisticProfile', 'StatisticThread', 'run', 'runctx', 'runfile',
'runpath',
)
class BaseLineIterator(object):
def __init__(self, getline, filename, global_dict):
self._getline = getline
self._filename = filename
self._global_dict = global_dict
self._lineno = 1
def __iter__(self):
return self
def next(self):
lineno = self._lineno
self._lineno += 1
return lineno, self._getline(self._filename, lineno, self._global_dict)
if sys.version_info < (3, ):
import codecs
# Find coding specification (see PEP-0263)
_matchCoding = re.compile(
r'^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)',
).match
class LineIterator(BaseLineIterator):
_encoding = None
def __init__(self, *args, **kw):
super(LineIterator, self).__init__(*args, **kw)
# Identify encoding.
first_line = self._getline(self._filename, 1, self._global_dict)
if isinstance(first_line, bytes):
# BOM - python2 only detects the (discouraged) UTF-8 BOM
if first_line.startswith(codecs.BOM_UTF8):
self._encoding = 'utf-8'
else:
# PEP-0263: "the first or second line must match [_matchCoding]"
match = _matchCoding(first_line)
if match is None:
match = _matchCoding(
self._getline(self._filename, 2, self._global_dict),
)
if match is None:
self._encoding = 'ascii'
else:
self._encoding = match.group(1)
# else, first line is unicode.
def next(self):
lineno, line = super(LineIterator, self).next()
if self._encoding:
line = line.decode(self._encoding, errors='replace')
return lineno, line
else:
# getline returns unicode objects, nothing to do
LineIterator = BaseLineIterator
if platform.system() == 'Windows':
quoteCommandline = windows_list2cmdline
else:
def quoteCommandline(commandline):
return ' '.join(shlex_quote(x) for x in commandline)
class EncodeOrReplaceWriter(object):
"""
Write-only file-ish object which replaces unsupported chars when
underlying file rejects them.
"""
def __init__(self, out):
self._encoding = getattr(out, 'encoding', None) or 'ascii'
self._write = out.write
def write(self, data):
try:
self._write(data)
except UnicodeEncodeError:
self._write(
data.encode(
self._encoding,
errors='replace',
).decode(self._encoding),
)
def _isCallgrindName(filepath):
return os.path.basename(filepath).startswith('cachegrind.out.')
class _FileTiming(object):
"""
Accumulation of profiling statistics (line and call durations) for a given
source "file" (unique global dict).
Subclasses should be aware that:
- this classes uses __slots__, mainly for cpu efficiency (property lookup
is in a list instead of a dict)
- it can access the BaseProfile instance which created any instace using
the "profiler" property, should they share some state across source
files.
- methods on this class are profiling choke-point - keep customisations
as cheap in CPU as you can !
"""
__slots__ = ('line_dict', 'call_dict', 'filename', 'global_dict',
'profiler')
def __init__(self, filename, global_dict, profiler):
self.filename = filename
self.global_dict = global_dict
self.line_dict = defaultdict(lambda: defaultdict(lambda: [0, 0]))
self.call_dict = {}
# Note: not used in this implementation, may be used by subclasses.
self.profiler = profiler
def hit(self, code, line, duration):
"""
A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds
"""
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration
def call(self, code, line, callee_file_timing, callee, duration, frame):
"""
A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return
"""
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration
def getHitStatsFor(self, line):
total_hits = total_duration = 0
for hits, duration in self.line_dict.get(line, {}).itervalues():
total_hits += hits
total_duration += duration
return total_hits, total_duration
def getLastLine(self):
return max(
max(self.line_dict) if self.line_dict else 0,
max(x for _, x, _ in self.call_dict) if self.call_dict else 0,
)
def iterHits(self):
for line, code_dict in self.line_dict.iteritems():
for code, (hits, duration) in code_dict.iteritems():
yield line, code, hits, duration
def iterCalls(self):
for (code, line, callee), (callee_file_timing, hit, duration) in \
self.call_dict.iteritems():
yield (
line,
code,
hit, duration,
callee_file_timing.filename, callee,
)
def getCallListByLine(self):
result = defaultdict(list)
for line, code, hit, duration, callee_filename, callee in self.iterCalls():
result[line].append((
code,
hit, duration,
callee_filename, callee,
))
return result
def getTotalTime(self):
return sum(
y[1]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getTotalHitCount(self):
return sum(
y[0]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getSortKey(self):
# total duration first, then total hit count for statistical profiling
result = [0, 0]
for entry in self.line_dict.itervalues():
for hit, duration in entry.itervalues():
result[0] += duration
result[1] += hit
return result
FileTiming = _FileTiming
class LocalDescriptor(threading.local):
"""
Implementation of descriptor API for thread-local properties.
"""
def __init__(self, func=None):
"""
func (callable)
If provided, called when a missing property is accessed
(ex: accessing thread never initialised that property).
If None, AttributeError is raised.
"""
super(LocalDescriptor, self).__init__()
if func is not None:
self.func = func
def __get__(self, instance, owner):
try:
return getattr(self, str(id(instance)))
except AttributeError:
# Raises AttributeError if func was not provided.
value = self.func()
setattr(self, str(id(instance)), value)
return value
def __set__(self, instance, value):
setattr(self, str(id(instance)), value)
def __delete__(self, instance):
try:
delattr(self, str(id(instance)))
except AttributeError:
pass
_ANNOTATE_HEADER = \
u'%6s|%10s|' \
u'%13s|%13s|%7s|' \
u'Source code' % (
u'Line #', u'Hits',
u'Time', u'Time per hit', u'%',
)
_ANNOTATE_HORIZONTAL_LINE = u''.join(x == u'|' and u'+' or u'-'
for x in _ANNOTATE_HEADER)
_ANNOTATE_FORMAT = \
u'%(lineno)6i|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'%(line)s'
_ANNOTATE_CALL_FORMAT = \
u'(call)|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'# %(callee_file)s:%(callee_line)s %(callee_name)s'
def _initStack():
# frame_time: when current frame execution started/resumed last
# frame_discount: time discounted from current frame, because it appeared
# lower in the call stack from the same callsite
# lineno: latest line which execution started
# line_time: time at which latest line started being executed
# line_duration: total time spent in current line up to last resume
now = time()
return (deque([[now, 0, None, now, 0]]), defaultdict(deque))
def _verboseProfileDecorator(self):
def decorator(func):
@wraps(func)
def wrapper(frame, event, arg):
self._traceEvent(frame, event)
return func(frame, event, arg)
return wrapper
return decorator
class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def getFilenameSet(self):
"""
Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path.
"""
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
def annotate(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored.
"""
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out)
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
class ProfileRunnerBase(object):
def __call__(self):
return self
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
# profile/cProfile-like API
def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self
def runcall(self, func, *args, **kw):
"""Similar to profile.Profile.runcall ."""
with self():
return func(*args, **kw)
def runfile(self, fd, argv, fd_name='<unknown>', compile_flags=0,
dont_inherit=1, globals={}):
with fd:
code = compile(fd.read(), fd_name, 'exec', flags=compile_flags,
dont_inherit=dont_inherit)
original_sys_argv = list(sys.argv)
ctx_globals = globals.copy()
ctx_globals['__file__'] = fd_name
ctx_globals['__name__'] = '__main__'
ctx_globals['__package__'] = None
try:
sys.argv[:] = argv
return self.runctx(code, ctx_globals, None)
finally:
sys.argv[:] = original_sys_argv
def runpath(self, path, argv):
original_sys_path = list(sys.path)
try:
sys.path.insert(0, os.path.dirname(path))
return self.runfile(open(path, 'rb'), argv, fd_name=path)
finally:
sys.path[:] = original_sys_path
def runmodule(self, module, argv):
original_sys_argv = list(sys.argv)
original_sys_path0 = sys.path[0]
try:
sys.path[0] = os.getcwd()
sys.argv[:] = argv
with self():
runpy.run_module(module, run_name='__main__', alter_sys=True)
finally:
sys.argv[:] = original_sys_argv
sys.path[0] = original_sys_path0
return self
class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',
'_local_trace',
'stack',
'enabled_start',
)
def __init__(self, verbose=False):
super(Profile, self).__init__()
if verbose:
self._global_trace = _verboseProfileDecorator(self)(
self._real_global_trace)
self._local_trace = _verboseProfileDecorator(self)(
self._real_local_trace)
else:
self._global_trace = self._real_global_trace
self._local_trace = self._real_local_trace
self.stack = None
self.enabled_start = None
def _enable(self):
"""
Overload this method when subclassing. Called before actually
enabling trace.
"""
self.stack = _initStack()
self.enabled_start = time()
def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace)
def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack
def disable(self):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
def __enter__(self):
"""
__enter__() -> self
"""
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Disables profiling.
"""
self.disable()
def _traceEvent(self, frame, event):
f_code = frame.f_code
lineno = frame.f_lineno
print('%10.6f%s%s %s:%s %s+%s' % (
time() - self.enabled_start,
' ' * len(self.stack[0]),
event,
f_code.co_filename,
lineno,
f_code.co_name,
lineno - f_code.co_firstlineno,
), file=sys.stderr)
def _real_global_trace(self, frame, event, arg):
local_trace = self._local_trace
if local_trace is not None:
event_time = time()
callee_entry = [event_time, 0, frame.f_lineno, event_time, 0]
stack, callee_dict = self.stack
try:
caller_entry = stack[-1]
except IndexError:
pass
else:
# Suspend caller frame
frame_time, frame_discount, lineno, line_time, line_duration = caller_entry
caller_entry[4] = event_time - line_time + line_duration
callee_dict[(frame.f_back.f_code, frame.f_code)].append(callee_entry)
stack.append(callee_entry)
return local_trace
def _real_local_trace(self, frame, event, arg):
if event == 'line' or event == 'return':
event_time = time()
stack, callee_dict = self.stack
try:
stack_entry = stack[-1]
except IndexError:
warn('Profiling stack underflow, disabling.')
self.disable()
return None
frame_time, frame_discount, lineno, line_time, line_duration = stack_entry
file_timing = self._getFileTiming(frame)
file_timing.hit(frame.f_code, lineno,
event_time - line_time + line_duration)
if event == 'line':
# Start a new line
stack_entry[2] = frame.f_lineno
stack_entry[3] = event_time
stack_entry[4] = 0
else:
# 'return' event, <frame> is still callee
# Resume caller frame
stack.pop()
stack[-1][3] = event_time
caller_frame = frame.f_back
caller_code = caller_frame.f_code
callee_code = frame.f_code
callee_entry_list = callee_dict[(caller_code, callee_code)]
callee_entry_list.pop()
call_duration = event_time - frame_time
if callee_entry_list:
# Callee is also somewhere up the stack, so discount this
# call duration from it.
callee_entry_list[-1][1] += call_duration
self._getFileTiming(caller_frame).call(
caller_code, caller_frame.f_lineno,
file_timing,
callee_code, call_duration - frame_discount,
frame,
)
return self._local_trace
# profile/cProfile-like API
def run(self, cmd):
"""Similar to profile.Profile.run ."""
import __main__
dikt = __main__.__dict__
return self.runctx(cmd, dikt, dikt)
class ThreadProfile(Profile):
"""
threading.Thread-aware version of Profile class.
Threads started after enable() call will be profiled.
After disable() call, threads will need to be switched into and trigger a
trace event (typically a "line" event) before they can notice the
disabling.
"""
__slots__ = ('_local_trace_backup', )
stack = LocalDescriptor(_initStack)
global_dict = LocalDescriptor(dict)
def __init__(self, **kw):
super(ThreadProfile, self).__init__(**kw)
self._local_trace_backup = self._local_trace
def _enable(self):
self._local_trace = self._local_trace_backup
threading.settrace(self._global_trace)
super(ThreadProfile, self)._enable()
def _disable(self):
super(ThreadProfile, self)._disable()
threading.settrace(None)
self._local_trace = None
class StatisticProfile(ProfileBase, ProfileRunnerBase):
"""
Statistic profiling class.
This class does not gather its own samples by itself.
Instead, it must be provided with call stacks (as returned by
sys._getframe() or sys._current_frames()).
"""
def __init__(self):
super(StatisticProfile, self).__init__()
self.total_time = 1
def sample(self, frame):
getFileTiming = self._getFileTiming
called_timing = getFileTiming(frame)
called_code = frame.f_code
called_timing.hit(called_code, frame.f_lineno, 0)
while True:
caller = frame.f_back
if caller is None:
break
caller_timing = getFileTiming(caller)
caller_code = caller.f_code
caller_timing.call(caller_code, caller.f_lineno,
called_timing, called_code, 0, frame)
called_timing = caller_timing
frame = caller
called_code = caller_code
def __call__(self, period=.001, single=True, group=None, name=None):
"""
Instanciate StatisticThread.
>>> s_profile = StatisticProfile()
>>> with s_profile(single=False):
>>> # Code to profile
Is equivalent to:
>>> s_profile = StatisticProfile()
>>> s_thread = StatisticThread(profiler=s_profile, single=False)
>>> with s_thread:
>>> # Code to profile
"""
return StatisticThread(
profiler=self, period=period, single=single, group=group,
name=name,
)
# BBB
StatisticalProfile = StatisticProfile
class StatisticThread(threading.Thread, ProfileRunnerBase):
"""
Usage in a nutshell:
with StatisticThread() as profiler_thread:
# do stuff
profiler_thread.profiler.print_stats()
"""
__slots__ = (
'_test',
'_start_time',
'clean_exit',
)
def __init__(self, profiler=None, period=.001, single=True, group=None, name=None):
"""
profiler (None or StatisticProfile instance)
Available on instances as the "profiler" read-only property.
If None, a new profiler instance will be created.
period (float)
How many seconds to wait between consecutive samples.
The smaller, the more profiling overhead, but the faster results
become meaningful.
The larger, the less profiling overhead, but requires long profiling
session to get meaningful results.
single (bool)
Profile only the thread which created this instance.
group, name
See Python's threading.Thread API.
"""
if profiler is None:
profiler = StatisticProfile()
if single:
self._test = lambda x, ident=threading.current_thread().ident: ident == x
else:
self._test = None
super(StatisticThread, self).__init__(
group=group,
name=name,
)
self._stop_event = threading.Event()
self._period = period
self._profiler = profiler
profiler.total_time = 0
self.daemon = True
self.clean_exit = False
@property
def profiler(self):
return self._profiler
def start(self):
self.clean_exit = False
self._can_run = True
self._start_time = time()
super(StatisticThread, self).start()
def stop(self):
"""
Request thread to stop.
Does not wait for actual termination (use join() method).
"""
if self.is_alive():
self._can_run = False
self._stop_event.set()
self._profiler.total_time += time() - self._start_time
self._start_time = None
def __enter__(self):
"""
__enter__() -> self
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Stops and joins profiling thread.
"""
self.stop()
self.join()
def run(self):
current_frames = sys._current_frames
test = self._test
if test is None:
test = lambda x, ident=threading.current_thread().ident: ident != x
sample = self._profiler.sample
stop_event = self._stop_event
wait = partial(stop_event.wait, self._period)
while self._can_run:
for ident, frame in current_frames().iteritems():
if test(ident):
sample(frame)
frame = None
wait()
stop_event.clear()
self.clean_exit = True
def callgrind(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.callgrind(*args, **kw)
def annotate(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.annotate(*args, **kw)
def dump_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.dump_stats(*args, **kw)
def print_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.print_stats(*args, **kw)
def iterSource(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.iterSource(*args, **kw)
# BBB
StatisticalThread = StatisticThread
# profile/cProfile-like API (no sort parameter !)
def _run(threads, verbose, func_name, filename, *args, **kw):
if threads:
klass = ThreadProfile
else:
klass = Profile
prof = klass(verbose=verbose)
try:
try:
getattr(prof, func_name)(*args, **kw)
except SystemExit:
pass
finally:
if filename is None:
prof.print_stats()
else:
prof.dump_stats(filename)
def run(cmd, filename=None, threads=True, verbose=False):
"""Similar to profile.run ."""
_run(threads, verbose, 'run', filename, cmd)
def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False):
"""Similar to profile.runctx ."""
_run(threads, verbose, 'runctx', filename, cmd, globals, locals)
def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1,
filename=None, threads=True, verbose=False):
"""
Run code from given file descriptor with profiling enabled.
Closes fd before executing contained code.
"""
_run(threads, verbose, 'runfile', filename, fd, argv, fd_name,
compile_flags, dont_inherit)
_allsep = os.sep + (os.altsep or '')
def _relpath(name):
"""
Strip absolute components from path.
Inspired from zipfile.write().
"""
return os.path.normpath(os.path.splitdrive(name)[1]).lstrip(_allsep)
def _main(argv, stdin=None):
format_dict = {
'text': 'annotate',
'callgrind': 'callgrind',
}
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('script', help='Python script to execute (optionaly '
'followed by its arguments)', nargs='?')
parser.add_argument('argv', nargs=argparse.REMAINDER)
parser.add_argument('-o', '--out', default='-',
help='Write annotated sources to this file. Defaults to stdout.')
parser.add_argument('-z', '--zipfile',
help='Name of a zip file to generate from all involved source files. '
'Useful with callgrind output.')
parser.add_argument('-t', '--threads', default=1, type=int, help='If '
'non-zero, trace threads spawned by program. Default: %(default)s')
parser.add_argument('-f', '--format', choices=format_dict,
help='Format in which output is generated. If not set, auto-detected '
'from filename if provided, falling back to "text".')
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable profiler internal tracing output. Cryptic and verbose.')
parser.add_argument('-s', '--statistic', default=0, type=float,
help='Use this period for statistic profiling, or use deterministic '
'profiling when 0.')
parser.add_argument('-m', dest='module',
help='Searches sys.path for the named module and runs the '
'corresponding .py file as a script. When given, positional arguments '
'become sys.argv[1:]')
group = parser.add_argument_group(
title='Filtering',
description='Allows excluding (and re-including) code from '
'"file names" matching regular expressions. '
'"file name" follows the semantics of python\'s "co_filename": '
'it may be a valid path, of an existing or non-existing file, '
'but it may be some arbitrary string too.'
)
group.add_argument('--exclude-syspath', action='store_true',
help='Exclude all from default "sys.path". Beware: this will also '
'exclude properly-installed non-standard modules, which may not be '
'what you want.')
group.add_argument('--exclude', action='append', default=[],
help='Exclude files whose name starts with any pattern.')
group.add_argument('--include', action='append', default=[],
help='Include files whose name would have otherwise excluded. '
'If no exclusion was specified, all paths are excluded first.')
options = parser.parse_args(argv[1:])
if options.exclude_syspath:
options.exclude.extend('^' + re.escape(x) for x in sys.path)
if options.include and not options.exclude:
options.exclude.append('') # All-matching regex
if options.verbose:
if options.exclude:
print('Excluding:', file=sys.stderr)
for regex in options.exclude:
print('\t' + regex, file=sys.stderr)
if options.include:
print('But including:', file=sys.stderr)
for regex in options.include:
print('\t' + regex, file=sys.stderr)
if options.module is None:
if options.script is None:
parser.error('too few arguments')
args = [options.script] + options.argv
runner_method_kw = {
'path': args[0],
'argv': args,
}
runner_method_id = 'runpath'
elif stdin is not None and options.module == '-':
# Undocumented way of using -m, used internaly by %%pprofile
args = ['<stdin>']
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
import __main__
runner_method_kw = {
'fd': stdin,
'argv': args,
'fd_name': '<stdin>',
'globals': __main__.__dict__,
}
runner_method_id = 'runfile'
else:
args = [options.module]
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
runner_method_kw = {
'module': options.module,
'argv': args,
}
runner_method_id = 'runmodule'
if options.format is None:
if _isCallgrindName(options.out):
options.format = 'callgrind'
else:
options.format = 'text'
relative_path = options.format == 'callgrind' and options.zipfile
if options.statistic:
prof = StatisticalProfile()
runner = StatisticalThread(
profiler=prof,
period=options.statistic,
single=not options.threads,
)
else:
if options.threads:
klass = ThreadProfile
else:
klass = Profile
prof = runner = klass(verbose=options.verbose)
try:
getattr(runner, runner_method_id)(**runner_method_kw)
finally:
if options.out == '-':
out = EncodeOrReplaceWriter(sys.stdout)
close = lambda: None
else:
out = io.open(options.out, 'w', errors='replace')
close = out.close
if options.exclude:
exclusion_search_list = [
re.compile(x).search for x in options.exclude
]
include_search_list = [
re.compile(x).search for x in options.include
]
filename_set = {
x for x in prof.getFilenameSet()
if not (
any(y(x) for y in exclusion_search_list) and
not any(y(x) for y in include_search_list)
)
}
else:
filename_set = None
commandline = quoteCommandline(args)
getattr(prof, format_dict[options.format])(
out,
filename=filename_set,
# python2 repr returns bytes, python3 repr returns unicode
commandline=getattr(
commandline,
'decode',
lambda _: commandline,
)('ascii'),
relative_path=relative_path,
)
close()
zip_path = options.zipfile
if zip_path:
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
with zipfile.ZipFile(
zip_path,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as zip_file:
for name, lines in prof.iterSource():
zip_file.writestr(
convertPath(name),
''.join(lines)
)
if options.statistic and not runner.clean_exit:
# Mostly useful for regresion testing, as exceptions raised in threads
# do not change exit status.
sys.exit(1)
def pprofile(line, cell=None):
"""
Profile line execution.
"""
if cell is None:
# TODO: detect and use arguments (statistical profiling, ...) ?
return run(line)
return _main(
['%%pprofile', '-m', '-'] + shlex.split(line),
io.StringIO(cell),
)
try:
register_line_cell_magic(pprofile)
except Exception:
# ipython can be imported, but may not be currently running.
pass
del pprofile
def main():
_main(sys.argv)
if __name__ == '__main__':
main()
|
vpelletier/pprofile | pprofile.py | _relpath | python | def _relpath(name):
return os.path.normpath(os.path.splitdrive(name)[1]).lstrip(_allsep) | Strip absolute components from path.
Inspired from zipfile.write(). | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1207-L1212 | null | #!/usr/bin/env python
# Copyright (C) 2013-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
pprofile - Line-granularity, thread-aware deterministic and statistic
pure-python profiler
Usage as a command line:
$ pprofile --exclude-syspath some_python_executable arg1 ...
$ pprofile --exclude-syspath -m some_python_module -- arg1 ...
$ python -m pprofile --exclude-syspath some_python_executable arg1 ...
$ python -m pprofile -m some_python_module -- arg1 ...
See --help for all options.
Usage as a python module:
Deterministic profiling:
>>> prof = pprofile.Profile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
Statistic profiling:
>>> prof = StatisticalProfile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
"""
from __future__ import print_function, division
from collections import defaultdict, deque
from functools import partial, wraps
# Note: use time, not clock.
# Clock, at least on linux, ignores time not spent executing code
# (ex: time.sleep()). The goal of pprofile is not to profile python code
# execution as such (ie, to improve python interpreter), but to profile a
# possibly complex application, with its (IO) waits, sleeps, (...) so a
# developper can understand what is slow rather than what keeps the cpu busy.
# So using the wall-clock as a way to measure time spent is more meaningful.
# XXX: This said, if time() lacks precision, a better but likely
# platform-dependent wall-clock time source must be identified and used.
from time import time
from warnings import warn
import argparse
import io
import inspect
from itertools import count
import linecache
import os
# not caught by 2to3, likely because pipes.quote is not documented in python 2
try:
from pipes import quote as shlex_quote # Python 2
except ImportError:
from shlex import quote as shlex_quote # Python 3
import platform
import re
import runpy
import shlex
from subprocess import list2cmdline as windows_list2cmdline
import sys
import threading
import zipfile
try:
from IPython.core.magic import register_line_cell_magic
except ImportError:
register_line_cell_magic = lambda x: x
__all__ = (
'ProfileBase', 'ProfileRunnerBase', 'Profile', 'ThreadProfile',
'StatisticProfile', 'StatisticThread', 'run', 'runctx', 'runfile',
'runpath',
)
class BaseLineIterator(object):
def __init__(self, getline, filename, global_dict):
self._getline = getline
self._filename = filename
self._global_dict = global_dict
self._lineno = 1
def __iter__(self):
return self
def next(self):
lineno = self._lineno
self._lineno += 1
return lineno, self._getline(self._filename, lineno, self._global_dict)
if sys.version_info < (3, ):
import codecs
# Find coding specification (see PEP-0263)
_matchCoding = re.compile(
r'^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)',
).match
class LineIterator(BaseLineIterator):
_encoding = None
def __init__(self, *args, **kw):
super(LineIterator, self).__init__(*args, **kw)
# Identify encoding.
first_line = self._getline(self._filename, 1, self._global_dict)
if isinstance(first_line, bytes):
# BOM - python2 only detects the (discouraged) UTF-8 BOM
if first_line.startswith(codecs.BOM_UTF8):
self._encoding = 'utf-8'
else:
# PEP-0263: "the first or second line must match [_matchCoding]"
match = _matchCoding(first_line)
if match is None:
match = _matchCoding(
self._getline(self._filename, 2, self._global_dict),
)
if match is None:
self._encoding = 'ascii'
else:
self._encoding = match.group(1)
# else, first line is unicode.
def next(self):
lineno, line = super(LineIterator, self).next()
if self._encoding:
line = line.decode(self._encoding, errors='replace')
return lineno, line
else:
# getline returns unicode objects, nothing to do
LineIterator = BaseLineIterator
if platform.system() == 'Windows':
quoteCommandline = windows_list2cmdline
else:
def quoteCommandline(commandline):
return ' '.join(shlex_quote(x) for x in commandline)
class EncodeOrReplaceWriter(object):
"""
Write-only file-ish object which replaces unsupported chars when
underlying file rejects them.
"""
def __init__(self, out):
self._encoding = getattr(out, 'encoding', None) or 'ascii'
self._write = out.write
def write(self, data):
try:
self._write(data)
except UnicodeEncodeError:
self._write(
data.encode(
self._encoding,
errors='replace',
).decode(self._encoding),
)
def _isCallgrindName(filepath):
return os.path.basename(filepath).startswith('cachegrind.out.')
class _FileTiming(object):
"""
Accumulation of profiling statistics (line and call durations) for a given
source "file" (unique global dict).
Subclasses should be aware that:
- this classes uses __slots__, mainly for cpu efficiency (property lookup
is in a list instead of a dict)
- it can access the BaseProfile instance which created any instace using
the "profiler" property, should they share some state across source
files.
- methods on this class are profiling choke-point - keep customisations
as cheap in CPU as you can !
"""
__slots__ = ('line_dict', 'call_dict', 'filename', 'global_dict',
'profiler')
def __init__(self, filename, global_dict, profiler):
self.filename = filename
self.global_dict = global_dict
self.line_dict = defaultdict(lambda: defaultdict(lambda: [0, 0]))
self.call_dict = {}
# Note: not used in this implementation, may be used by subclasses.
self.profiler = profiler
def hit(self, code, line, duration):
"""
A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds
"""
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration
def call(self, code, line, callee_file_timing, callee, duration, frame):
"""
A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return
"""
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration
def getHitStatsFor(self, line):
total_hits = total_duration = 0
for hits, duration in self.line_dict.get(line, {}).itervalues():
total_hits += hits
total_duration += duration
return total_hits, total_duration
def getLastLine(self):
return max(
max(self.line_dict) if self.line_dict else 0,
max(x for _, x, _ in self.call_dict) if self.call_dict else 0,
)
def iterHits(self):
for line, code_dict in self.line_dict.iteritems():
for code, (hits, duration) in code_dict.iteritems():
yield line, code, hits, duration
def iterCalls(self):
for (code, line, callee), (callee_file_timing, hit, duration) in \
self.call_dict.iteritems():
yield (
line,
code,
hit, duration,
callee_file_timing.filename, callee,
)
def getCallListByLine(self):
result = defaultdict(list)
for line, code, hit, duration, callee_filename, callee in self.iterCalls():
result[line].append((
code,
hit, duration,
callee_filename, callee,
))
return result
def getTotalTime(self):
return sum(
y[1]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getTotalHitCount(self):
return sum(
y[0]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getSortKey(self):
# total duration first, then total hit count for statistical profiling
result = [0, 0]
for entry in self.line_dict.itervalues():
for hit, duration in entry.itervalues():
result[0] += duration
result[1] += hit
return result
FileTiming = _FileTiming
class LocalDescriptor(threading.local):
"""
Implementation of descriptor API for thread-local properties.
"""
def __init__(self, func=None):
"""
func (callable)
If provided, called when a missing property is accessed
(ex: accessing thread never initialised that property).
If None, AttributeError is raised.
"""
super(LocalDescriptor, self).__init__()
if func is not None:
self.func = func
def __get__(self, instance, owner):
try:
return getattr(self, str(id(instance)))
except AttributeError:
# Raises AttributeError if func was not provided.
value = self.func()
setattr(self, str(id(instance)), value)
return value
def __set__(self, instance, value):
setattr(self, str(id(instance)), value)
def __delete__(self, instance):
try:
delattr(self, str(id(instance)))
except AttributeError:
pass
_ANNOTATE_HEADER = \
u'%6s|%10s|' \
u'%13s|%13s|%7s|' \
u'Source code' % (
u'Line #', u'Hits',
u'Time', u'Time per hit', u'%',
)
_ANNOTATE_HORIZONTAL_LINE = u''.join(x == u'|' and u'+' or u'-'
for x in _ANNOTATE_HEADER)
_ANNOTATE_FORMAT = \
u'%(lineno)6i|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'%(line)s'
_ANNOTATE_CALL_FORMAT = \
u'(call)|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'# %(callee_file)s:%(callee_line)s %(callee_name)s'
def _initStack():
# frame_time: when current frame execution started/resumed last
# frame_discount: time discounted from current frame, because it appeared
# lower in the call stack from the same callsite
# lineno: latest line which execution started
# line_time: time at which latest line started being executed
# line_duration: total time spent in current line up to last resume
now = time()
return (deque([[now, 0, None, now, 0]]), defaultdict(deque))
def _verboseProfileDecorator(self):
def decorator(func):
@wraps(func)
def wrapper(frame, event, arg):
self._traceEvent(frame, event)
return func(frame, event, arg)
return wrapper
return decorator
class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def getFilenameSet(self):
"""
Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path.
"""
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
def annotate(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored.
"""
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out)
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
class ProfileRunnerBase(object):
def __call__(self):
return self
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
# profile/cProfile-like API
def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self
def runcall(self, func, *args, **kw):
"""Similar to profile.Profile.runcall ."""
with self():
return func(*args, **kw)
def runfile(self, fd, argv, fd_name='<unknown>', compile_flags=0,
dont_inherit=1, globals={}):
with fd:
code = compile(fd.read(), fd_name, 'exec', flags=compile_flags,
dont_inherit=dont_inherit)
original_sys_argv = list(sys.argv)
ctx_globals = globals.copy()
ctx_globals['__file__'] = fd_name
ctx_globals['__name__'] = '__main__'
ctx_globals['__package__'] = None
try:
sys.argv[:] = argv
return self.runctx(code, ctx_globals, None)
finally:
sys.argv[:] = original_sys_argv
def runpath(self, path, argv):
original_sys_path = list(sys.path)
try:
sys.path.insert(0, os.path.dirname(path))
return self.runfile(open(path, 'rb'), argv, fd_name=path)
finally:
sys.path[:] = original_sys_path
def runmodule(self, module, argv):
original_sys_argv = list(sys.argv)
original_sys_path0 = sys.path[0]
try:
sys.path[0] = os.getcwd()
sys.argv[:] = argv
with self():
runpy.run_module(module, run_name='__main__', alter_sys=True)
finally:
sys.argv[:] = original_sys_argv
sys.path[0] = original_sys_path0
return self
class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',
'_local_trace',
'stack',
'enabled_start',
)
def __init__(self, verbose=False):
super(Profile, self).__init__()
if verbose:
self._global_trace = _verboseProfileDecorator(self)(
self._real_global_trace)
self._local_trace = _verboseProfileDecorator(self)(
self._real_local_trace)
else:
self._global_trace = self._real_global_trace
self._local_trace = self._real_local_trace
self.stack = None
self.enabled_start = None
def _enable(self):
"""
Overload this method when subclassing. Called before actually
enabling trace.
"""
self.stack = _initStack()
self.enabled_start = time()
def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace)
def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack
def disable(self):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
def __enter__(self):
"""
__enter__() -> self
"""
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Disables profiling.
"""
self.disable()
def _traceEvent(self, frame, event):
f_code = frame.f_code
lineno = frame.f_lineno
print('%10.6f%s%s %s:%s %s+%s' % (
time() - self.enabled_start,
' ' * len(self.stack[0]),
event,
f_code.co_filename,
lineno,
f_code.co_name,
lineno - f_code.co_firstlineno,
), file=sys.stderr)
def _real_global_trace(self, frame, event, arg):
local_trace = self._local_trace
if local_trace is not None:
event_time = time()
callee_entry = [event_time, 0, frame.f_lineno, event_time, 0]
stack, callee_dict = self.stack
try:
caller_entry = stack[-1]
except IndexError:
pass
else:
# Suspend caller frame
frame_time, frame_discount, lineno, line_time, line_duration = caller_entry
caller_entry[4] = event_time - line_time + line_duration
callee_dict[(frame.f_back.f_code, frame.f_code)].append(callee_entry)
stack.append(callee_entry)
return local_trace
def _real_local_trace(self, frame, event, arg):
if event == 'line' or event == 'return':
event_time = time()
stack, callee_dict = self.stack
try:
stack_entry = stack[-1]
except IndexError:
warn('Profiling stack underflow, disabling.')
self.disable()
return None
frame_time, frame_discount, lineno, line_time, line_duration = stack_entry
file_timing = self._getFileTiming(frame)
file_timing.hit(frame.f_code, lineno,
event_time - line_time + line_duration)
if event == 'line':
# Start a new line
stack_entry[2] = frame.f_lineno
stack_entry[3] = event_time
stack_entry[4] = 0
else:
# 'return' event, <frame> is still callee
# Resume caller frame
stack.pop()
stack[-1][3] = event_time
caller_frame = frame.f_back
caller_code = caller_frame.f_code
callee_code = frame.f_code
callee_entry_list = callee_dict[(caller_code, callee_code)]
callee_entry_list.pop()
call_duration = event_time - frame_time
if callee_entry_list:
# Callee is also somewhere up the stack, so discount this
# call duration from it.
callee_entry_list[-1][1] += call_duration
self._getFileTiming(caller_frame).call(
caller_code, caller_frame.f_lineno,
file_timing,
callee_code, call_duration - frame_discount,
frame,
)
return self._local_trace
# profile/cProfile-like API
def run(self, cmd):
"""Similar to profile.Profile.run ."""
import __main__
dikt = __main__.__dict__
return self.runctx(cmd, dikt, dikt)
class ThreadProfile(Profile):
"""
threading.Thread-aware version of Profile class.
Threads started after enable() call will be profiled.
After disable() call, threads will need to be switched into and trigger a
trace event (typically a "line" event) before they can notice the
disabling.
"""
__slots__ = ('_local_trace_backup', )
stack = LocalDescriptor(_initStack)
global_dict = LocalDescriptor(dict)
def __init__(self, **kw):
super(ThreadProfile, self).__init__(**kw)
self._local_trace_backup = self._local_trace
def _enable(self):
self._local_trace = self._local_trace_backup
threading.settrace(self._global_trace)
super(ThreadProfile, self)._enable()
def _disable(self):
super(ThreadProfile, self)._disable()
threading.settrace(None)
self._local_trace = None
class StatisticProfile(ProfileBase, ProfileRunnerBase):
"""
Statistic profiling class.
This class does not gather its own samples by itself.
Instead, it must be provided with call stacks (as returned by
sys._getframe() or sys._current_frames()).
"""
def __init__(self):
super(StatisticProfile, self).__init__()
self.total_time = 1
def sample(self, frame):
getFileTiming = self._getFileTiming
called_timing = getFileTiming(frame)
called_code = frame.f_code
called_timing.hit(called_code, frame.f_lineno, 0)
while True:
caller = frame.f_back
if caller is None:
break
caller_timing = getFileTiming(caller)
caller_code = caller.f_code
caller_timing.call(caller_code, caller.f_lineno,
called_timing, called_code, 0, frame)
called_timing = caller_timing
frame = caller
called_code = caller_code
def __call__(self, period=.001, single=True, group=None, name=None):
"""
Instanciate StatisticThread.
>>> s_profile = StatisticProfile()
>>> with s_profile(single=False):
>>> # Code to profile
Is equivalent to:
>>> s_profile = StatisticProfile()
>>> s_thread = StatisticThread(profiler=s_profile, single=False)
>>> with s_thread:
>>> # Code to profile
"""
return StatisticThread(
profiler=self, period=period, single=single, group=group,
name=name,
)
# BBB
StatisticalProfile = StatisticProfile
class StatisticThread(threading.Thread, ProfileRunnerBase):
"""
Usage in a nutshell:
with StatisticThread() as profiler_thread:
# do stuff
profiler_thread.profiler.print_stats()
"""
__slots__ = (
'_test',
'_start_time',
'clean_exit',
)
def __init__(self, profiler=None, period=.001, single=True, group=None, name=None):
"""
profiler (None or StatisticProfile instance)
Available on instances as the "profiler" read-only property.
If None, a new profiler instance will be created.
period (float)
How many seconds to wait between consecutive samples.
The smaller, the more profiling overhead, but the faster results
become meaningful.
The larger, the less profiling overhead, but requires long profiling
session to get meaningful results.
single (bool)
Profile only the thread which created this instance.
group, name
See Python's threading.Thread API.
"""
if profiler is None:
profiler = StatisticProfile()
if single:
self._test = lambda x, ident=threading.current_thread().ident: ident == x
else:
self._test = None
super(StatisticThread, self).__init__(
group=group,
name=name,
)
self._stop_event = threading.Event()
self._period = period
self._profiler = profiler
profiler.total_time = 0
self.daemon = True
self.clean_exit = False
@property
def profiler(self):
return self._profiler
def start(self):
self.clean_exit = False
self._can_run = True
self._start_time = time()
super(StatisticThread, self).start()
def stop(self):
"""
Request thread to stop.
Does not wait for actual termination (use join() method).
"""
if self.is_alive():
self._can_run = False
self._stop_event.set()
self._profiler.total_time += time() - self._start_time
self._start_time = None
def __enter__(self):
"""
__enter__() -> self
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Stops and joins profiling thread.
"""
self.stop()
self.join()
def run(self):
current_frames = sys._current_frames
test = self._test
if test is None:
test = lambda x, ident=threading.current_thread().ident: ident != x
sample = self._profiler.sample
stop_event = self._stop_event
wait = partial(stop_event.wait, self._period)
while self._can_run:
for ident, frame in current_frames().iteritems():
if test(ident):
sample(frame)
frame = None
wait()
stop_event.clear()
self.clean_exit = True
def callgrind(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.callgrind(*args, **kw)
def annotate(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.annotate(*args, **kw)
def dump_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.dump_stats(*args, **kw)
def print_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.print_stats(*args, **kw)
def iterSource(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.iterSource(*args, **kw)
# BBB
StatisticalThread = StatisticThread
# profile/cProfile-like API (no sort parameter !)
def _run(threads, verbose, func_name, filename, *args, **kw):
if threads:
klass = ThreadProfile
else:
klass = Profile
prof = klass(verbose=verbose)
try:
try:
getattr(prof, func_name)(*args, **kw)
except SystemExit:
pass
finally:
if filename is None:
prof.print_stats()
else:
prof.dump_stats(filename)
def run(cmd, filename=None, threads=True, verbose=False):
"""Similar to profile.run ."""
_run(threads, verbose, 'run', filename, cmd)
def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False):
"""Similar to profile.runctx ."""
_run(threads, verbose, 'runctx', filename, cmd, globals, locals)
def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1,
filename=None, threads=True, verbose=False):
"""
Run code from given file descriptor with profiling enabled.
Closes fd before executing contained code.
"""
_run(threads, verbose, 'runfile', filename, fd, argv, fd_name,
compile_flags, dont_inherit)
def runpath(path, argv, filename=None, threads=True, verbose=False):
"""
Run code from open-accessible file path with profiling enabled.
"""
_run(threads, verbose, 'runpath', filename, path, argv)
_allsep = os.sep + (os.altsep or '')
def _main(argv, stdin=None):
format_dict = {
'text': 'annotate',
'callgrind': 'callgrind',
}
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('script', help='Python script to execute (optionaly '
'followed by its arguments)', nargs='?')
parser.add_argument('argv', nargs=argparse.REMAINDER)
parser.add_argument('-o', '--out', default='-',
help='Write annotated sources to this file. Defaults to stdout.')
parser.add_argument('-z', '--zipfile',
help='Name of a zip file to generate from all involved source files. '
'Useful with callgrind output.')
parser.add_argument('-t', '--threads', default=1, type=int, help='If '
'non-zero, trace threads spawned by program. Default: %(default)s')
parser.add_argument('-f', '--format', choices=format_dict,
help='Format in which output is generated. If not set, auto-detected '
'from filename if provided, falling back to "text".')
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable profiler internal tracing output. Cryptic and verbose.')
parser.add_argument('-s', '--statistic', default=0, type=float,
help='Use this period for statistic profiling, or use deterministic '
'profiling when 0.')
parser.add_argument('-m', dest='module',
help='Searches sys.path for the named module and runs the '
'corresponding .py file as a script. When given, positional arguments '
'become sys.argv[1:]')
group = parser.add_argument_group(
title='Filtering',
description='Allows excluding (and re-including) code from '
'"file names" matching regular expressions. '
'"file name" follows the semantics of python\'s "co_filename": '
'it may be a valid path, of an existing or non-existing file, '
'but it may be some arbitrary string too.'
)
group.add_argument('--exclude-syspath', action='store_true',
help='Exclude all from default "sys.path". Beware: this will also '
'exclude properly-installed non-standard modules, which may not be '
'what you want.')
group.add_argument('--exclude', action='append', default=[],
help='Exclude files whose name starts with any pattern.')
group.add_argument('--include', action='append', default=[],
help='Include files whose name would have otherwise excluded. '
'If no exclusion was specified, all paths are excluded first.')
options = parser.parse_args(argv[1:])
if options.exclude_syspath:
options.exclude.extend('^' + re.escape(x) for x in sys.path)
if options.include and not options.exclude:
options.exclude.append('') # All-matching regex
if options.verbose:
if options.exclude:
print('Excluding:', file=sys.stderr)
for regex in options.exclude:
print('\t' + regex, file=sys.stderr)
if options.include:
print('But including:', file=sys.stderr)
for regex in options.include:
print('\t' + regex, file=sys.stderr)
if options.module is None:
if options.script is None:
parser.error('too few arguments')
args = [options.script] + options.argv
runner_method_kw = {
'path': args[0],
'argv': args,
}
runner_method_id = 'runpath'
elif stdin is not None and options.module == '-':
# Undocumented way of using -m, used internaly by %%pprofile
args = ['<stdin>']
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
import __main__
runner_method_kw = {
'fd': stdin,
'argv': args,
'fd_name': '<stdin>',
'globals': __main__.__dict__,
}
runner_method_id = 'runfile'
else:
args = [options.module]
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
runner_method_kw = {
'module': options.module,
'argv': args,
}
runner_method_id = 'runmodule'
if options.format is None:
if _isCallgrindName(options.out):
options.format = 'callgrind'
else:
options.format = 'text'
relative_path = options.format == 'callgrind' and options.zipfile
if options.statistic:
prof = StatisticalProfile()
runner = StatisticalThread(
profiler=prof,
period=options.statistic,
single=not options.threads,
)
else:
if options.threads:
klass = ThreadProfile
else:
klass = Profile
prof = runner = klass(verbose=options.verbose)
try:
getattr(runner, runner_method_id)(**runner_method_kw)
finally:
if options.out == '-':
out = EncodeOrReplaceWriter(sys.stdout)
close = lambda: None
else:
out = io.open(options.out, 'w', errors='replace')
close = out.close
if options.exclude:
exclusion_search_list = [
re.compile(x).search for x in options.exclude
]
include_search_list = [
re.compile(x).search for x in options.include
]
filename_set = {
x for x in prof.getFilenameSet()
if not (
any(y(x) for y in exclusion_search_list) and
not any(y(x) for y in include_search_list)
)
}
else:
filename_set = None
commandline = quoteCommandline(args)
getattr(prof, format_dict[options.format])(
out,
filename=filename_set,
# python2 repr returns bytes, python3 repr returns unicode
commandline=getattr(
commandline,
'decode',
lambda _: commandline,
)('ascii'),
relative_path=relative_path,
)
close()
zip_path = options.zipfile
if zip_path:
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
with zipfile.ZipFile(
zip_path,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as zip_file:
for name, lines in prof.iterSource():
zip_file.writestr(
convertPath(name),
''.join(lines)
)
if options.statistic and not runner.clean_exit:
# Mostly useful for regresion testing, as exceptions raised in threads
# do not change exit status.
sys.exit(1)
def pprofile(line, cell=None):
"""
Profile line execution.
"""
if cell is None:
# TODO: detect and use arguments (statistical profiling, ...) ?
return run(line)
return _main(
['%%pprofile', '-m', '-'] + shlex.split(line),
io.StringIO(cell),
)
try:
register_line_cell_magic(pprofile)
except Exception:
# ipython can be imported, but may not be currently running.
pass
del pprofile
def main():
_main(sys.argv)
if __name__ == '__main__':
main()
|
vpelletier/pprofile | pprofile.py | pprofile | python | def pprofile(line, cell=None):
if cell is None:
# TODO: detect and use arguments (statistical profiling, ...) ?
return run(line)
return _main(
['%%pprofile', '-m', '-'] + shlex.split(line),
io.StringIO(cell),
) | Profile line execution. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1388-L1398 | [
"def run(cmd, filename=None, threads=True, verbose=False):\n \"\"\"Similar to profile.run .\"\"\"\n _run(threads, verbose, 'run', filename, cmd)\n",
"def _main(argv, stdin=None):\n format_dict = {\n 'text': 'annotate',\n 'callgrind': 'callgrind',\n }\n\n parser = argparse.ArgumentParser(argv[0])\n parser.add_argument('script', help='Python script to execute (optionaly '\n 'followed by its arguments)', nargs='?')\n parser.add_argument('argv', nargs=argparse.REMAINDER)\n parser.add_argument('-o', '--out', default='-',\n help='Write annotated sources to this file. Defaults to stdout.')\n parser.add_argument('-z', '--zipfile',\n help='Name of a zip file to generate from all involved source files. '\n 'Useful with callgrind output.')\n parser.add_argument('-t', '--threads', default=1, type=int, help='If '\n 'non-zero, trace threads spawned by program. Default: %(default)s')\n parser.add_argument('-f', '--format', choices=format_dict,\n help='Format in which output is generated. If not set, auto-detected '\n 'from filename if provided, falling back to \"text\".')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='Enable profiler internal tracing output. Cryptic and verbose.')\n parser.add_argument('-s', '--statistic', default=0, type=float,\n help='Use this period for statistic profiling, or use deterministic '\n 'profiling when 0.')\n parser.add_argument('-m', dest='module',\n help='Searches sys.path for the named module and runs the '\n 'corresponding .py file as a script. When given, positional arguments '\n 'become sys.argv[1:]')\n\n group = parser.add_argument_group(\n title='Filtering',\n description='Allows excluding (and re-including) code from '\n '\"file names\" matching regular expressions. '\n '\"file name\" follows the semantics of python\\'s \"co_filename\": '\n 'it may be a valid path, of an existing or non-existing file, '\n 'but it may be some arbitrary string too.'\n )\n group.add_argument('--exclude-syspath', action='store_true',\n help='Exclude all from default \"sys.path\". Beware: this will also '\n 'exclude properly-installed non-standard modules, which may not be '\n 'what you want.')\n group.add_argument('--exclude', action='append', default=[],\n help='Exclude files whose name starts with any pattern.')\n group.add_argument('--include', action='append', default=[],\n help='Include files whose name would have otherwise excluded. '\n 'If no exclusion was specified, all paths are excluded first.')\n\n options = parser.parse_args(argv[1:])\n if options.exclude_syspath:\n options.exclude.extend('^' + re.escape(x) for x in sys.path)\n if options.include and not options.exclude:\n options.exclude.append('') # All-matching regex\n if options.verbose:\n if options.exclude:\n print('Excluding:', file=sys.stderr)\n for regex in options.exclude:\n print('\\t' + regex, file=sys.stderr)\n if options.include:\n print('But including:', file=sys.stderr)\n for regex in options.include:\n print('\\t' + regex, file=sys.stderr)\n\n if options.module is None:\n if options.script is None:\n parser.error('too few arguments')\n args = [options.script] + options.argv\n runner_method_kw = {\n 'path': args[0],\n 'argv': args,\n }\n runner_method_id = 'runpath'\n elif stdin is not None and options.module == '-':\n # Undocumented way of using -m, used internaly by %%pprofile\n args = ['<stdin>']\n if options.script is not None:\n args.append(options.script)\n args.extend(options.argv)\n import __main__\n runner_method_kw = {\n 'fd': stdin,\n 'argv': args,\n 'fd_name': '<stdin>',\n 'globals': __main__.__dict__,\n }\n runner_method_id = 'runfile'\n else:\n args = [options.module]\n if options.script is not None:\n args.append(options.script)\n args.extend(options.argv)\n runner_method_kw = {\n 'module': options.module,\n 'argv': args,\n }\n runner_method_id = 'runmodule'\n if options.format is None:\n if _isCallgrindName(options.out):\n options.format = 'callgrind'\n else:\n options.format = 'text'\n relative_path = options.format == 'callgrind' and options.zipfile\n if options.statistic:\n prof = StatisticalProfile()\n runner = StatisticalThread(\n profiler=prof,\n period=options.statistic,\n single=not options.threads,\n )\n else:\n if options.threads:\n klass = ThreadProfile\n else:\n klass = Profile\n prof = runner = klass(verbose=options.verbose)\n try:\n getattr(runner, runner_method_id)(**runner_method_kw)\n finally:\n if options.out == '-':\n out = EncodeOrReplaceWriter(sys.stdout)\n close = lambda: None\n else:\n out = io.open(options.out, 'w', errors='replace')\n close = out.close\n if options.exclude:\n exclusion_search_list = [\n re.compile(x).search for x in options.exclude\n ]\n include_search_list = [\n re.compile(x).search for x in options.include\n ]\n filename_set = {\n x for x in prof.getFilenameSet()\n if not (\n any(y(x) for y in exclusion_search_list) and\n not any(y(x) for y in include_search_list)\n )\n }\n else:\n filename_set = None\n commandline = quoteCommandline(args)\n getattr(prof, format_dict[options.format])(\n out,\n filename=filename_set,\n # python2 repr returns bytes, python3 repr returns unicode\n commandline=getattr(\n commandline,\n 'decode',\n lambda _: commandline,\n )('ascii'),\n relative_path=relative_path,\n )\n close()\n zip_path = options.zipfile\n if zip_path:\n if relative_path:\n convertPath = _relpath\n else:\n convertPath = lambda x: x\n with zipfile.ZipFile(\n zip_path,\n mode='w',\n compression=zipfile.ZIP_DEFLATED,\n ) as zip_file:\n for name, lines in prof.iterSource():\n zip_file.writestr(\n convertPath(name),\n ''.join(lines)\n )\n if options.statistic and not runner.clean_exit:\n # Mostly useful for regresion testing, as exceptions raised in threads\n # do not change exit status.\n sys.exit(1)\n"
] | #!/usr/bin/env python
# Copyright (C) 2013-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
pprofile - Line-granularity, thread-aware deterministic and statistic
pure-python profiler
Usage as a command line:
$ pprofile --exclude-syspath some_python_executable arg1 ...
$ pprofile --exclude-syspath -m some_python_module -- arg1 ...
$ python -m pprofile --exclude-syspath some_python_executable arg1 ...
$ python -m pprofile -m some_python_module -- arg1 ...
See --help for all options.
Usage as a python module:
Deterministic profiling:
>>> prof = pprofile.Profile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
Statistic profiling:
>>> prof = StatisticalProfile()
>>> with prof():
>>> # Code to profile
>>> prof.print_stats()
"""
from __future__ import print_function, division
from collections import defaultdict, deque
from functools import partial, wraps
# Note: use time, not clock.
# Clock, at least on linux, ignores time not spent executing code
# (ex: time.sleep()). The goal of pprofile is not to profile python code
# execution as such (ie, to improve python interpreter), but to profile a
# possibly complex application, with its (IO) waits, sleeps, (...) so a
# developper can understand what is slow rather than what keeps the cpu busy.
# So using the wall-clock as a way to measure time spent is more meaningful.
# XXX: This said, if time() lacks precision, a better but likely
# platform-dependent wall-clock time source must be identified and used.
from time import time
from warnings import warn
import argparse
import io
import inspect
from itertools import count
import linecache
import os
# not caught by 2to3, likely because pipes.quote is not documented in python 2
try:
from pipes import quote as shlex_quote # Python 2
except ImportError:
from shlex import quote as shlex_quote # Python 3
import platform
import re
import runpy
import shlex
from subprocess import list2cmdline as windows_list2cmdline
import sys
import threading
import zipfile
try:
from IPython.core.magic import register_line_cell_magic
except ImportError:
register_line_cell_magic = lambda x: x
__all__ = (
'ProfileBase', 'ProfileRunnerBase', 'Profile', 'ThreadProfile',
'StatisticProfile', 'StatisticThread', 'run', 'runctx', 'runfile',
'runpath',
)
class BaseLineIterator(object):
def __init__(self, getline, filename, global_dict):
self._getline = getline
self._filename = filename
self._global_dict = global_dict
self._lineno = 1
def __iter__(self):
return self
def next(self):
lineno = self._lineno
self._lineno += 1
return lineno, self._getline(self._filename, lineno, self._global_dict)
if sys.version_info < (3, ):
import codecs
# Find coding specification (see PEP-0263)
_matchCoding = re.compile(
r'^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)',
).match
class LineIterator(BaseLineIterator):
_encoding = None
def __init__(self, *args, **kw):
super(LineIterator, self).__init__(*args, **kw)
# Identify encoding.
first_line = self._getline(self._filename, 1, self._global_dict)
if isinstance(first_line, bytes):
# BOM - python2 only detects the (discouraged) UTF-8 BOM
if first_line.startswith(codecs.BOM_UTF8):
self._encoding = 'utf-8'
else:
# PEP-0263: "the first or second line must match [_matchCoding]"
match = _matchCoding(first_line)
if match is None:
match = _matchCoding(
self._getline(self._filename, 2, self._global_dict),
)
if match is None:
self._encoding = 'ascii'
else:
self._encoding = match.group(1)
# else, first line is unicode.
def next(self):
lineno, line = super(LineIterator, self).next()
if self._encoding:
line = line.decode(self._encoding, errors='replace')
return lineno, line
else:
# getline returns unicode objects, nothing to do
LineIterator = BaseLineIterator
if platform.system() == 'Windows':
quoteCommandline = windows_list2cmdline
else:
def quoteCommandline(commandline):
return ' '.join(shlex_quote(x) for x in commandline)
class EncodeOrReplaceWriter(object):
"""
Write-only file-ish object which replaces unsupported chars when
underlying file rejects them.
"""
def __init__(self, out):
self._encoding = getattr(out, 'encoding', None) or 'ascii'
self._write = out.write
def write(self, data):
try:
self._write(data)
except UnicodeEncodeError:
self._write(
data.encode(
self._encoding,
errors='replace',
).decode(self._encoding),
)
def _isCallgrindName(filepath):
return os.path.basename(filepath).startswith('cachegrind.out.')
class _FileTiming(object):
"""
Accumulation of profiling statistics (line and call durations) for a given
source "file" (unique global dict).
Subclasses should be aware that:
- this classes uses __slots__, mainly for cpu efficiency (property lookup
is in a list instead of a dict)
- it can access the BaseProfile instance which created any instace using
the "profiler" property, should they share some state across source
files.
- methods on this class are profiling choke-point - keep customisations
as cheap in CPU as you can !
"""
__slots__ = ('line_dict', 'call_dict', 'filename', 'global_dict',
'profiler')
def __init__(self, filename, global_dict, profiler):
self.filename = filename
self.global_dict = global_dict
self.line_dict = defaultdict(lambda: defaultdict(lambda: [0, 0]))
self.call_dict = {}
# Note: not used in this implementation, may be used by subclasses.
self.profiler = profiler
def hit(self, code, line, duration):
"""
A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds
"""
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration
def call(self, code, line, callee_file_timing, callee, duration, frame):
"""
A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return
"""
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration
def getHitStatsFor(self, line):
total_hits = total_duration = 0
for hits, duration in self.line_dict.get(line, {}).itervalues():
total_hits += hits
total_duration += duration
return total_hits, total_duration
def getLastLine(self):
return max(
max(self.line_dict) if self.line_dict else 0,
max(x for _, x, _ in self.call_dict) if self.call_dict else 0,
)
def iterHits(self):
for line, code_dict in self.line_dict.iteritems():
for code, (hits, duration) in code_dict.iteritems():
yield line, code, hits, duration
def iterCalls(self):
for (code, line, callee), (callee_file_timing, hit, duration) in \
self.call_dict.iteritems():
yield (
line,
code,
hit, duration,
callee_file_timing.filename, callee,
)
def getCallListByLine(self):
result = defaultdict(list)
for line, code, hit, duration, callee_filename, callee in self.iterCalls():
result[line].append((
code,
hit, duration,
callee_filename, callee,
))
return result
def getTotalTime(self):
return sum(
y[1]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getTotalHitCount(self):
return sum(
y[0]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getSortKey(self):
# total duration first, then total hit count for statistical profiling
result = [0, 0]
for entry in self.line_dict.itervalues():
for hit, duration in entry.itervalues():
result[0] += duration
result[1] += hit
return result
FileTiming = _FileTiming
class LocalDescriptor(threading.local):
"""
Implementation of descriptor API for thread-local properties.
"""
def __init__(self, func=None):
"""
func (callable)
If provided, called when a missing property is accessed
(ex: accessing thread never initialised that property).
If None, AttributeError is raised.
"""
super(LocalDescriptor, self).__init__()
if func is not None:
self.func = func
def __get__(self, instance, owner):
try:
return getattr(self, str(id(instance)))
except AttributeError:
# Raises AttributeError if func was not provided.
value = self.func()
setattr(self, str(id(instance)), value)
return value
def __set__(self, instance, value):
setattr(self, str(id(instance)), value)
def __delete__(self, instance):
try:
delattr(self, str(id(instance)))
except AttributeError:
pass
_ANNOTATE_HEADER = \
u'%6s|%10s|' \
u'%13s|%13s|%7s|' \
u'Source code' % (
u'Line #', u'Hits',
u'Time', u'Time per hit', u'%',
)
_ANNOTATE_HORIZONTAL_LINE = u''.join(x == u'|' and u'+' or u'-'
for x in _ANNOTATE_HEADER)
_ANNOTATE_FORMAT = \
u'%(lineno)6i|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'%(line)s'
_ANNOTATE_CALL_FORMAT = \
u'(call)|%(hits)10i|' \
u'%(time)13g|%(time_per_hit)13g|%(percent)6.2f%%|' \
u'# %(callee_file)s:%(callee_line)s %(callee_name)s'
def _initStack():
# frame_time: when current frame execution started/resumed last
# frame_discount: time discounted from current frame, because it appeared
# lower in the call stack from the same callsite
# lineno: latest line which execution started
# line_time: time at which latest line started being executed
# line_duration: total time spent in current line up to last resume
now = time()
return (deque([[now, 0, None, now, 0]]), defaultdict(deque))
def _verboseProfileDecorator(self):
def decorator(func):
@wraps(func)
def wrapper(frame, event, arg):
self._traceEvent(frame, event)
return func(frame, event, arg)
return wrapper
return decorator
class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def getFilenameSet(self):
"""
Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path.
"""
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
def annotate(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored.
"""
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out)
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
class ProfileRunnerBase(object):
def __call__(self):
return self
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
# profile/cProfile-like API
def runctx(self, cmd, globals, locals):
"""Similar to profile.Profile.runctx ."""
with self():
exec(cmd, globals, locals)
return self
def runcall(self, func, *args, **kw):
"""Similar to profile.Profile.runcall ."""
with self():
return func(*args, **kw)
def runfile(self, fd, argv, fd_name='<unknown>', compile_flags=0,
dont_inherit=1, globals={}):
with fd:
code = compile(fd.read(), fd_name, 'exec', flags=compile_flags,
dont_inherit=dont_inherit)
original_sys_argv = list(sys.argv)
ctx_globals = globals.copy()
ctx_globals['__file__'] = fd_name
ctx_globals['__name__'] = '__main__'
ctx_globals['__package__'] = None
try:
sys.argv[:] = argv
return self.runctx(code, ctx_globals, None)
finally:
sys.argv[:] = original_sys_argv
def runpath(self, path, argv):
original_sys_path = list(sys.path)
try:
sys.path.insert(0, os.path.dirname(path))
return self.runfile(open(path, 'rb'), argv, fd_name=path)
finally:
sys.path[:] = original_sys_path
def runmodule(self, module, argv):
original_sys_argv = list(sys.argv)
original_sys_path0 = sys.path[0]
try:
sys.path[0] = os.getcwd()
sys.argv[:] = argv
with self():
runpy.run_module(module, run_name='__main__', alter_sys=True)
finally:
sys.argv[:] = original_sys_argv
sys.path[0] = original_sys_path0
return self
class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',
'_local_trace',
'stack',
'enabled_start',
)
def __init__(self, verbose=False):
super(Profile, self).__init__()
if verbose:
self._global_trace = _verboseProfileDecorator(self)(
self._real_global_trace)
self._local_trace = _verboseProfileDecorator(self)(
self._real_local_trace)
else:
self._global_trace = self._real_global_trace
self._local_trace = self._real_local_trace
self.stack = None
self.enabled_start = None
def _enable(self):
"""
Overload this method when subclassing. Called before actually
enabling trace.
"""
self.stack = _initStack()
self.enabled_start = time()
def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace)
def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack
def disable(self):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
def __enter__(self):
"""
__enter__() -> self
"""
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Disables profiling.
"""
self.disable()
def _traceEvent(self, frame, event):
f_code = frame.f_code
lineno = frame.f_lineno
print('%10.6f%s%s %s:%s %s+%s' % (
time() - self.enabled_start,
' ' * len(self.stack[0]),
event,
f_code.co_filename,
lineno,
f_code.co_name,
lineno - f_code.co_firstlineno,
), file=sys.stderr)
def _real_global_trace(self, frame, event, arg):
local_trace = self._local_trace
if local_trace is not None:
event_time = time()
callee_entry = [event_time, 0, frame.f_lineno, event_time, 0]
stack, callee_dict = self.stack
try:
caller_entry = stack[-1]
except IndexError:
pass
else:
# Suspend caller frame
frame_time, frame_discount, lineno, line_time, line_duration = caller_entry
caller_entry[4] = event_time - line_time + line_duration
callee_dict[(frame.f_back.f_code, frame.f_code)].append(callee_entry)
stack.append(callee_entry)
return local_trace
def _real_local_trace(self, frame, event, arg):
if event == 'line' or event == 'return':
event_time = time()
stack, callee_dict = self.stack
try:
stack_entry = stack[-1]
except IndexError:
warn('Profiling stack underflow, disabling.')
self.disable()
return None
frame_time, frame_discount, lineno, line_time, line_duration = stack_entry
file_timing = self._getFileTiming(frame)
file_timing.hit(frame.f_code, lineno,
event_time - line_time + line_duration)
if event == 'line':
# Start a new line
stack_entry[2] = frame.f_lineno
stack_entry[3] = event_time
stack_entry[4] = 0
else:
# 'return' event, <frame> is still callee
# Resume caller frame
stack.pop()
stack[-1][3] = event_time
caller_frame = frame.f_back
caller_code = caller_frame.f_code
callee_code = frame.f_code
callee_entry_list = callee_dict[(caller_code, callee_code)]
callee_entry_list.pop()
call_duration = event_time - frame_time
if callee_entry_list:
# Callee is also somewhere up the stack, so discount this
# call duration from it.
callee_entry_list[-1][1] += call_duration
self._getFileTiming(caller_frame).call(
caller_code, caller_frame.f_lineno,
file_timing,
callee_code, call_duration - frame_discount,
frame,
)
return self._local_trace
# profile/cProfile-like API
def run(self, cmd):
"""Similar to profile.Profile.run ."""
import __main__
dikt = __main__.__dict__
return self.runctx(cmd, dikt, dikt)
class ThreadProfile(Profile):
"""
threading.Thread-aware version of Profile class.
Threads started after enable() call will be profiled.
After disable() call, threads will need to be switched into and trigger a
trace event (typically a "line" event) before they can notice the
disabling.
"""
__slots__ = ('_local_trace_backup', )
stack = LocalDescriptor(_initStack)
global_dict = LocalDescriptor(dict)
def __init__(self, **kw):
super(ThreadProfile, self).__init__(**kw)
self._local_trace_backup = self._local_trace
def _enable(self):
self._local_trace = self._local_trace_backup
threading.settrace(self._global_trace)
super(ThreadProfile, self)._enable()
def _disable(self):
super(ThreadProfile, self)._disable()
threading.settrace(None)
self._local_trace = None
class StatisticProfile(ProfileBase, ProfileRunnerBase):
"""
Statistic profiling class.
This class does not gather its own samples by itself.
Instead, it must be provided with call stacks (as returned by
sys._getframe() or sys._current_frames()).
"""
def __init__(self):
super(StatisticProfile, self).__init__()
self.total_time = 1
def sample(self, frame):
getFileTiming = self._getFileTiming
called_timing = getFileTiming(frame)
called_code = frame.f_code
called_timing.hit(called_code, frame.f_lineno, 0)
while True:
caller = frame.f_back
if caller is None:
break
caller_timing = getFileTiming(caller)
caller_code = caller.f_code
caller_timing.call(caller_code, caller.f_lineno,
called_timing, called_code, 0, frame)
called_timing = caller_timing
frame = caller
called_code = caller_code
def __call__(self, period=.001, single=True, group=None, name=None):
"""
Instanciate StatisticThread.
>>> s_profile = StatisticProfile()
>>> with s_profile(single=False):
>>> # Code to profile
Is equivalent to:
>>> s_profile = StatisticProfile()
>>> s_thread = StatisticThread(profiler=s_profile, single=False)
>>> with s_thread:
>>> # Code to profile
"""
return StatisticThread(
profiler=self, period=period, single=single, group=group,
name=name,
)
# BBB
StatisticalProfile = StatisticProfile
class StatisticThread(threading.Thread, ProfileRunnerBase):
"""
Usage in a nutshell:
with StatisticThread() as profiler_thread:
# do stuff
profiler_thread.profiler.print_stats()
"""
__slots__ = (
'_test',
'_start_time',
'clean_exit',
)
def __init__(self, profiler=None, period=.001, single=True, group=None, name=None):
"""
profiler (None or StatisticProfile instance)
Available on instances as the "profiler" read-only property.
If None, a new profiler instance will be created.
period (float)
How many seconds to wait between consecutive samples.
The smaller, the more profiling overhead, but the faster results
become meaningful.
The larger, the less profiling overhead, but requires long profiling
session to get meaningful results.
single (bool)
Profile only the thread which created this instance.
group, name
See Python's threading.Thread API.
"""
if profiler is None:
profiler = StatisticProfile()
if single:
self._test = lambda x, ident=threading.current_thread().ident: ident == x
else:
self._test = None
super(StatisticThread, self).__init__(
group=group,
name=name,
)
self._stop_event = threading.Event()
self._period = period
self._profiler = profiler
profiler.total_time = 0
self.daemon = True
self.clean_exit = False
@property
def profiler(self):
return self._profiler
def start(self):
self.clean_exit = False
self._can_run = True
self._start_time = time()
super(StatisticThread, self).start()
def stop(self):
"""
Request thread to stop.
Does not wait for actual termination (use join() method).
"""
if self.is_alive():
self._can_run = False
self._stop_event.set()
self._profiler.total_time += time() - self._start_time
self._start_time = None
def __enter__(self):
"""
__enter__() -> self
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Stops and joins profiling thread.
"""
self.stop()
self.join()
def run(self):
current_frames = sys._current_frames
test = self._test
if test is None:
test = lambda x, ident=threading.current_thread().ident: ident != x
sample = self._profiler.sample
stop_event = self._stop_event
wait = partial(stop_event.wait, self._period)
while self._can_run:
for ident, frame in current_frames().iteritems():
if test(ident):
sample(frame)
frame = None
wait()
stop_event.clear()
self.clean_exit = True
def callgrind(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.callgrind(*args, **kw)
def annotate(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.annotate(*args, **kw)
def dump_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.dump_stats(*args, **kw)
def print_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.print_stats(*args, **kw)
def iterSource(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.iterSource(*args, **kw)
# BBB
StatisticalThread = StatisticThread
# profile/cProfile-like API (no sort parameter !)
def _run(threads, verbose, func_name, filename, *args, **kw):
if threads:
klass = ThreadProfile
else:
klass = Profile
prof = klass(verbose=verbose)
try:
try:
getattr(prof, func_name)(*args, **kw)
except SystemExit:
pass
finally:
if filename is None:
prof.print_stats()
else:
prof.dump_stats(filename)
def run(cmd, filename=None, threads=True, verbose=False):
"""Similar to profile.run ."""
_run(threads, verbose, 'run', filename, cmd)
def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False):
"""Similar to profile.runctx ."""
_run(threads, verbose, 'runctx', filename, cmd, globals, locals)
def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1,
filename=None, threads=True, verbose=False):
"""
Run code from given file descriptor with profiling enabled.
Closes fd before executing contained code.
"""
_run(threads, verbose, 'runfile', filename, fd, argv, fd_name,
compile_flags, dont_inherit)
def runpath(path, argv, filename=None, threads=True, verbose=False):
"""
Run code from open-accessible file path with profiling enabled.
"""
_run(threads, verbose, 'runpath', filename, path, argv)
_allsep = os.sep + (os.altsep or '')
def _relpath(name):
"""
Strip absolute components from path.
Inspired from zipfile.write().
"""
return os.path.normpath(os.path.splitdrive(name)[1]).lstrip(_allsep)
def _main(argv, stdin=None):
format_dict = {
'text': 'annotate',
'callgrind': 'callgrind',
}
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('script', help='Python script to execute (optionaly '
'followed by its arguments)', nargs='?')
parser.add_argument('argv', nargs=argparse.REMAINDER)
parser.add_argument('-o', '--out', default='-',
help='Write annotated sources to this file. Defaults to stdout.')
parser.add_argument('-z', '--zipfile',
help='Name of a zip file to generate from all involved source files. '
'Useful with callgrind output.')
parser.add_argument('-t', '--threads', default=1, type=int, help='If '
'non-zero, trace threads spawned by program. Default: %(default)s')
parser.add_argument('-f', '--format', choices=format_dict,
help='Format in which output is generated. If not set, auto-detected '
'from filename if provided, falling back to "text".')
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable profiler internal tracing output. Cryptic and verbose.')
parser.add_argument('-s', '--statistic', default=0, type=float,
help='Use this period for statistic profiling, or use deterministic '
'profiling when 0.')
parser.add_argument('-m', dest='module',
help='Searches sys.path for the named module and runs the '
'corresponding .py file as a script. When given, positional arguments '
'become sys.argv[1:]')
group = parser.add_argument_group(
title='Filtering',
description='Allows excluding (and re-including) code from '
'"file names" matching regular expressions. '
'"file name" follows the semantics of python\'s "co_filename": '
'it may be a valid path, of an existing or non-existing file, '
'but it may be some arbitrary string too.'
)
group.add_argument('--exclude-syspath', action='store_true',
help='Exclude all from default "sys.path". Beware: this will also '
'exclude properly-installed non-standard modules, which may not be '
'what you want.')
group.add_argument('--exclude', action='append', default=[],
help='Exclude files whose name starts with any pattern.')
group.add_argument('--include', action='append', default=[],
help='Include files whose name would have otherwise excluded. '
'If no exclusion was specified, all paths are excluded first.')
options = parser.parse_args(argv[1:])
if options.exclude_syspath:
options.exclude.extend('^' + re.escape(x) for x in sys.path)
if options.include and not options.exclude:
options.exclude.append('') # All-matching regex
if options.verbose:
if options.exclude:
print('Excluding:', file=sys.stderr)
for regex in options.exclude:
print('\t' + regex, file=sys.stderr)
if options.include:
print('But including:', file=sys.stderr)
for regex in options.include:
print('\t' + regex, file=sys.stderr)
if options.module is None:
if options.script is None:
parser.error('too few arguments')
args = [options.script] + options.argv
runner_method_kw = {
'path': args[0],
'argv': args,
}
runner_method_id = 'runpath'
elif stdin is not None and options.module == '-':
# Undocumented way of using -m, used internaly by %%pprofile
args = ['<stdin>']
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
import __main__
runner_method_kw = {
'fd': stdin,
'argv': args,
'fd_name': '<stdin>',
'globals': __main__.__dict__,
}
runner_method_id = 'runfile'
else:
args = [options.module]
if options.script is not None:
args.append(options.script)
args.extend(options.argv)
runner_method_kw = {
'module': options.module,
'argv': args,
}
runner_method_id = 'runmodule'
if options.format is None:
if _isCallgrindName(options.out):
options.format = 'callgrind'
else:
options.format = 'text'
relative_path = options.format == 'callgrind' and options.zipfile
if options.statistic:
prof = StatisticalProfile()
runner = StatisticalThread(
profiler=prof,
period=options.statistic,
single=not options.threads,
)
else:
if options.threads:
klass = ThreadProfile
else:
klass = Profile
prof = runner = klass(verbose=options.verbose)
try:
getattr(runner, runner_method_id)(**runner_method_kw)
finally:
if options.out == '-':
out = EncodeOrReplaceWriter(sys.stdout)
close = lambda: None
else:
out = io.open(options.out, 'w', errors='replace')
close = out.close
if options.exclude:
exclusion_search_list = [
re.compile(x).search for x in options.exclude
]
include_search_list = [
re.compile(x).search for x in options.include
]
filename_set = {
x for x in prof.getFilenameSet()
if not (
any(y(x) for y in exclusion_search_list) and
not any(y(x) for y in include_search_list)
)
}
else:
filename_set = None
commandline = quoteCommandline(args)
getattr(prof, format_dict[options.format])(
out,
filename=filename_set,
# python2 repr returns bytes, python3 repr returns unicode
commandline=getattr(
commandline,
'decode',
lambda _: commandline,
)('ascii'),
relative_path=relative_path,
)
close()
zip_path = options.zipfile
if zip_path:
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
with zipfile.ZipFile(
zip_path,
mode='w',
compression=zipfile.ZIP_DEFLATED,
) as zip_file:
for name, lines in prof.iterSource():
zip_file.writestr(
convertPath(name),
''.join(lines)
)
if options.statistic and not runner.clean_exit:
# Mostly useful for regresion testing, as exceptions raised in threads
# do not change exit status.
sys.exit(1)
try:
register_line_cell_magic(pprofile)
except Exception:
# ipython can be imported, but may not be currently running.
pass
del pprofile
def main():
_main(sys.argv)
if __name__ == '__main__':
main()
|
vpelletier/pprofile | pprofile.py | _FileTiming.hit | python | def hit(self, code, line, duration):
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration | A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L192-L205 | null | class _FileTiming(object):
"""
Accumulation of profiling statistics (line and call durations) for a given
source "file" (unique global dict).
Subclasses should be aware that:
- this classes uses __slots__, mainly for cpu efficiency (property lookup
is in a list instead of a dict)
- it can access the BaseProfile instance which created any instace using
the "profiler" property, should they share some state across source
files.
- methods on this class are profiling choke-point - keep customisations
as cheap in CPU as you can !
"""
__slots__ = ('line_dict', 'call_dict', 'filename', 'global_dict',
'profiler')
def __init__(self, filename, global_dict, profiler):
self.filename = filename
self.global_dict = global_dict
self.line_dict = defaultdict(lambda: defaultdict(lambda: [0, 0]))
self.call_dict = {}
# Note: not used in this implementation, may be used by subclasses.
self.profiler = profiler
def call(self, code, line, callee_file_timing, callee, duration, frame):
"""
A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return
"""
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration
def getHitStatsFor(self, line):
total_hits = total_duration = 0
for hits, duration in self.line_dict.get(line, {}).itervalues():
total_hits += hits
total_duration += duration
return total_hits, total_duration
def getLastLine(self):
return max(
max(self.line_dict) if self.line_dict else 0,
max(x for _, x, _ in self.call_dict) if self.call_dict else 0,
)
def iterHits(self):
for line, code_dict in self.line_dict.iteritems():
for code, (hits, duration) in code_dict.iteritems():
yield line, code, hits, duration
def iterCalls(self):
for (code, line, callee), (callee_file_timing, hit, duration) in \
self.call_dict.iteritems():
yield (
line,
code,
hit, duration,
callee_file_timing.filename, callee,
)
def getCallListByLine(self):
result = defaultdict(list)
for line, code, hit, duration, callee_filename, callee in self.iterCalls():
result[line].append((
code,
hit, duration,
callee_filename, callee,
))
return result
def getTotalTime(self):
return sum(
y[1]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getTotalHitCount(self):
return sum(
y[0]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getSortKey(self):
# total duration first, then total hit count for statistical profiling
result = [0, 0]
for entry in self.line_dict.itervalues():
for hit, duration in entry.itervalues():
result[0] += duration
result[1] += hit
return result
|
vpelletier/pprofile | pprofile.py | _FileTiming.call | python | def call(self, code, line, callee_file_timing, callee, duration, frame):
try:
entry = self.call_dict[(code, line, callee)]
except KeyError:
self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration]
else:
entry[1] += 1
entry[2] += duration | A call originating from this file returned.
code (code)
caller's code object
line (int)
caller's line number
callee_file_timing (FileTiming)
callee's FileTiming
callee (code)
callee's code object
duration (float)
duration of the call, in seconds
frame (frame)
calle's entire frame as of its return | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L207-L230 | null | class _FileTiming(object):
"""
Accumulation of profiling statistics (line and call durations) for a given
source "file" (unique global dict).
Subclasses should be aware that:
- this classes uses __slots__, mainly for cpu efficiency (property lookup
is in a list instead of a dict)
- it can access the BaseProfile instance which created any instace using
the "profiler" property, should they share some state across source
files.
- methods on this class are profiling choke-point - keep customisations
as cheap in CPU as you can !
"""
__slots__ = ('line_dict', 'call_dict', 'filename', 'global_dict',
'profiler')
def __init__(self, filename, global_dict, profiler):
self.filename = filename
self.global_dict = global_dict
self.line_dict = defaultdict(lambda: defaultdict(lambda: [0, 0]))
self.call_dict = {}
# Note: not used in this implementation, may be used by subclasses.
self.profiler = profiler
def hit(self, code, line, duration):
"""
A line has finished executing.
code (code)
container function's code object
line (int)
line number of just executed line
duration (float)
duration of the line, in seconds
"""
entry = self.line_dict[line][code]
entry[0] += 1
entry[1] += duration
def getHitStatsFor(self, line):
total_hits = total_duration = 0
for hits, duration in self.line_dict.get(line, {}).itervalues():
total_hits += hits
total_duration += duration
return total_hits, total_duration
def getLastLine(self):
return max(
max(self.line_dict) if self.line_dict else 0,
max(x for _, x, _ in self.call_dict) if self.call_dict else 0,
)
def iterHits(self):
for line, code_dict in self.line_dict.iteritems():
for code, (hits, duration) in code_dict.iteritems():
yield line, code, hits, duration
def iterCalls(self):
for (code, line, callee), (callee_file_timing, hit, duration) in \
self.call_dict.iteritems():
yield (
line,
code,
hit, duration,
callee_file_timing.filename, callee,
)
def getCallListByLine(self):
result = defaultdict(list)
for line, code, hit, duration, callee_filename, callee in self.iterCalls():
result[line].append((
code,
hit, duration,
callee_filename, callee,
))
return result
def getTotalTime(self):
return sum(
y[1]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getTotalHitCount(self):
return sum(
y[0]
for x in self.line_dict.itervalues()
for y in x.itervalues()
)
def getSortKey(self):
# total duration first, then total hit count for statistical profiling
result = [0, 0]
for entry in self.line_dict.itervalues():
for hit, duration in entry.itervalues():
result[0] += duration
result[1] += hit
return result
|
vpelletier/pprofile | pprofile.py | ProfileBase.getFilenameSet | python | def getFilenameSet(self):
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result | Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L483-L501 | [
"def _mergeFileTiming(self, rebuild=False):\n merged_file_dict = self.merged_file_dict\n if merged_file_dict and not rebuild:\n return merged_file_dict\n merged_file_dict.clear()\n # Regroup by module, to find all duplicates from other threads.\n by_global_dict = defaultdict(list)\n for file_timing_list in self.file_dict.itervalues():\n for file_timing in file_timing_list:\n by_global_dict[\n id(file_timing.global_dict)\n ].append(\n file_timing,\n )\n # Resolve name conflicts.\n global_to_named_dict = {}\n for global_dict_id, file_timing_list in by_global_dict.iteritems():\n file_timing = file_timing_list[0]\n name = file_timing.filename\n if name in merged_file_dict:\n counter = count()\n base_name = name\n while name in merged_file_dict:\n name = base_name + '_%i' % next(counter)\n global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(\n name,\n file_timing.global_dict,\n file_timing.profiler, # Note: should be self\n )\n # Add all file timings from one module together under its\n # deduplicated name. This needs to happen after all names\n # are generated and all empty file timings are created so\n # call events cross-references can be remapped.\n for merged_file_timing in merged_file_dict.itervalues():\n line_dict = merged_file_timing.line_dict\n for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:\n for line, other_code_dict in file_timing.line_dict.iteritems():\n code_dict = line_dict[line]\n for code, (\n other_hits,\n other_duration,\n ) in other_code_dict.iteritems():\n entry = code_dict[code]\n entry[0] += other_hits\n entry[1] += other_duration\n call_dict = merged_file_timing.call_dict\n for key, (\n other_callee_file_timing,\n other_hits,\n other_duration,\n ) in file_timing.call_dict.iteritems():\n try:\n entry = call_dict[key]\n except KeyError:\n entry = call_dict[key] = [\n global_to_named_dict[\n id(other_callee_file_timing.global_dict)\n ],\n other_hits,\n other_duration,\n ]\n else:\n entry[1] += other_hits\n entry[2] += other_duration\n return merged_file_dict\n"
] | class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
def annotate(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored.
"""
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out)
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
|
vpelletier/pprofile | pprofile.py | ProfileBase.callgrind | python | def callgrind(self, out, filename=None, commandline=None, relative_path=False):
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out) | Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L520-L629 | [
"def _relpath(name):\n \"\"\"\n Strip absolute components from path.\n Inspired from zipfile.write().\n \"\"\"\n return os.path.normpath(os.path.splitdrive(name)[1]).lstrip(_allsep)\n",
"def _mergeFileTiming(self, rebuild=False):\n merged_file_dict = self.merged_file_dict\n if merged_file_dict and not rebuild:\n return merged_file_dict\n merged_file_dict.clear()\n # Regroup by module, to find all duplicates from other threads.\n by_global_dict = defaultdict(list)\n for file_timing_list in self.file_dict.itervalues():\n for file_timing in file_timing_list:\n by_global_dict[\n id(file_timing.global_dict)\n ].append(\n file_timing,\n )\n # Resolve name conflicts.\n global_to_named_dict = {}\n for global_dict_id, file_timing_list in by_global_dict.iteritems():\n file_timing = file_timing_list[0]\n name = file_timing.filename\n if name in merged_file_dict:\n counter = count()\n base_name = name\n while name in merged_file_dict:\n name = base_name + '_%i' % next(counter)\n global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(\n name,\n file_timing.global_dict,\n file_timing.profiler, # Note: should be self\n )\n # Add all file timings from one module together under its\n # deduplicated name. This needs to happen after all names\n # are generated and all empty file timings are created so\n # call events cross-references can be remapped.\n for merged_file_timing in merged_file_dict.itervalues():\n line_dict = merged_file_timing.line_dict\n for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:\n for line, other_code_dict in file_timing.line_dict.iteritems():\n code_dict = line_dict[line]\n for code, (\n other_hits,\n other_duration,\n ) in other_code_dict.iteritems():\n entry = code_dict[code]\n entry[0] += other_hits\n entry[1] += other_duration\n call_dict = merged_file_timing.call_dict\n for key, (\n other_callee_file_timing,\n other_hits,\n other_duration,\n ) in file_timing.call_dict.iteritems():\n try:\n entry = call_dict[key]\n except KeyError:\n entry = call_dict[key] = [\n global_to_named_dict[\n id(other_callee_file_timing.global_dict)\n ],\n other_hits,\n other_duration,\n ]\n else:\n entry[1] += other_hits\n entry[2] += other_duration\n return merged_file_dict\n",
"def _getFileNameList(self, filename, may_sort=True):\n if filename is None:\n filename = self.getFilenameSet()\n elif isinstance(filename, basestring):\n return [filename]\n if may_sort:\n try:\n # Detect if filename is an ordered data type.\n filename[:0]\n except TypeError:\n # Not ordered, sort.\n file_dict = self._mergeFileTiming()\n filename = sorted(filename, reverse=True,\n key=lambda x: file_dict[x].getSortKey()\n )\n return filename\n",
"convertPath = lambda x: x\n",
"convertPath = lambda x, cascade=convertPath: cascade(\n '/'.join(x.split(os.path.sep))\n)\n",
"def getCodeName(filename, code):\n # Tracks code objects globally, because callee information needs\n # to be consistent accross files.\n # Inside a file, grants unique names to each code object.\n try:\n return code_to_name_dict[code]\n except KeyError:\n name = code.co_name + ':%i' % code.co_firstlineno\n key = (filename, name)\n homonym_count = homonym_counter.get(key, 0)\n if homonym_count:\n name += '_%i' % homonym_count\n homonym_counter[key] = homonym_count + 1\n code_to_name_dict[code] = name\n return name\n"
] | class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def getFilenameSet(self):
"""
Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path.
"""
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def annotate(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored.
"""
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out)
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
|
vpelletier/pprofile | pprofile.py | ProfileBase.annotate | python | def annotate(self, out, filename=None, commandline=None, relative_path=False):
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out) | Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L631-L699 | [
"def _mergeFileTiming(self, rebuild=False):\n merged_file_dict = self.merged_file_dict\n if merged_file_dict and not rebuild:\n return merged_file_dict\n merged_file_dict.clear()\n # Regroup by module, to find all duplicates from other threads.\n by_global_dict = defaultdict(list)\n for file_timing_list in self.file_dict.itervalues():\n for file_timing in file_timing_list:\n by_global_dict[\n id(file_timing.global_dict)\n ].append(\n file_timing,\n )\n # Resolve name conflicts.\n global_to_named_dict = {}\n for global_dict_id, file_timing_list in by_global_dict.iteritems():\n file_timing = file_timing_list[0]\n name = file_timing.filename\n if name in merged_file_dict:\n counter = count()\n base_name = name\n while name in merged_file_dict:\n name = base_name + '_%i' % next(counter)\n global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(\n name,\n file_timing.global_dict,\n file_timing.profiler, # Note: should be self\n )\n # Add all file timings from one module together under its\n # deduplicated name. This needs to happen after all names\n # are generated and all empty file timings are created so\n # call events cross-references can be remapped.\n for merged_file_timing in merged_file_dict.itervalues():\n line_dict = merged_file_timing.line_dict\n for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:\n for line, other_code_dict in file_timing.line_dict.iteritems():\n code_dict = line_dict[line]\n for code, (\n other_hits,\n other_duration,\n ) in other_code_dict.iteritems():\n entry = code_dict[code]\n entry[0] += other_hits\n entry[1] += other_duration\n call_dict = merged_file_timing.call_dict\n for key, (\n other_callee_file_timing,\n other_hits,\n other_duration,\n ) in file_timing.call_dict.iteritems():\n try:\n entry = call_dict[key]\n except KeyError:\n entry = call_dict[key] = [\n global_to_named_dict[\n id(other_callee_file_timing.global_dict)\n ],\n other_hits,\n other_duration,\n ]\n else:\n entry[1] += other_hits\n entry[2] += other_duration\n return merged_file_dict\n",
"def _getFileNameList(self, filename, may_sort=True):\n if filename is None:\n filename = self.getFilenameSet()\n elif isinstance(filename, basestring):\n return [filename]\n if may_sort:\n try:\n # Detect if filename is an ordered data type.\n filename[:0]\n except TypeError:\n # Not ordered, sort.\n file_dict = self._mergeFileTiming()\n filename = sorted(filename, reverse=True,\n key=lambda x: file_dict[x].getSortKey()\n )\n return filename\n",
"def percent(value, scale):\n if scale == 0:\n return 0\n return value * 100 / scale\n"
] | class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def getFilenameSet(self):
"""
Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path.
"""
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def dump_stats(self, filename):
"""
Similar to profile.Profile.dump_stats - but different output format !
"""
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out)
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
|
vpelletier/pprofile | pprofile.py | ProfileBase.dump_stats | python | def dump_stats(self, filename):
if _isCallgrindName(filename):
with open(filename, 'w') as out:
self.callgrind(out)
else:
with io.open(filename, 'w', errors='replace') as out:
self.annotate(out) | Similar to profile.Profile.dump_stats - but different output format ! | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L723-L732 | [
"def _isCallgrindName(filepath):\n return os.path.basename(filepath).startswith('cachegrind.out.')\n",
"def callgrind(self, out, filename=None, commandline=None, relative_path=False):\n \"\"\"\n Dump statistics in callgrind format.\n Contains:\n - per-line hit count, time and time-per-hit\n - call associations (call tree)\n Note: hit count is not inclusive, in that it is not the sum of all\n hits inside that call.\n Time unit: microsecond (1e-6 second).\n out (file-ish opened for writing)\n Destination of callgrind profiling data.\n filename (str, collection of str)\n If provided, dump stats for given source file(s) only.\n By default, list for all known files.\n commandline (anything with __str__)\n If provided, will be output as the command line used to generate\n this profiling data.\n relative_path (bool)\n When True, absolute elements are stripped from path. Useful when\n maintaining several copies of source trees with their own\n profiling result, so kcachegrind does not look in system-wide\n files which may not match with profiled code.\n \"\"\"\n print(u'# callgrind format', file=out)\n print(u'version: 1', file=out)\n print(u'creator: pprofile', file=out)\n print(u'event: usphit :microseconds/hit', file=out)\n print(u'events: hits microseconds usphit', file=out)\n if commandline is not None:\n print(u'cmd:', commandline, file=out)\n file_dict = self._mergeFileTiming()\n if relative_path:\n convertPath = _relpath\n else:\n convertPath = lambda x: x\n if os.path.sep != \"/\":\n # qCacheGrind (windows build) needs at least one UNIX separator\n # in path to find the file. Adapt here even if this is probably\n # more of a qCacheGrind issue...\n convertPath = lambda x, cascade=convertPath: cascade(\n '/'.join(x.split(os.path.sep))\n )\n code_to_name_dict = {}\n homonym_counter = {}\n def getCodeName(filename, code):\n # Tracks code objects globally, because callee information needs\n # to be consistent accross files.\n # Inside a file, grants unique names to each code object.\n try:\n return code_to_name_dict[code]\n except KeyError:\n name = code.co_name + ':%i' % code.co_firstlineno\n key = (filename, name)\n homonym_count = homonym_counter.get(key, 0)\n if homonym_count:\n name += '_%i' % homonym_count\n homonym_counter[key] = homonym_count + 1\n code_to_name_dict[code] = name\n return name\n for current_file in self._getFileNameList(filename, may_sort=False):\n file_timing = file_dict[current_file]\n print(u'fl=%s' % convertPath(current_file), file=out)\n # When a local callable is created an immediately executed, this\n # loop would start a new \"fn=\" section but would not end it before\n # emitting \"cfn=\" lines, making the callee appear as not being\n # called by interrupted \"fn=\" section.\n # So dispatch all functions in a first pass, and build\n # uninterrupted sections in a second pass.\n # Note: cost line is a list just to be mutable. A single item is\n # expected.\n func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))\n for lineno, code, hits, duration in file_timing.iterHits():\n func_dict[getCodeName(current_file, code)][lineno][0].append(\n (hits, int(duration * 1000000)),\n )\n for (\n lineno,\n caller,\n call_hits, call_duration,\n callee_file, callee,\n ) in file_timing.iterCalls():\n call_ticks = int(call_duration * 1000000)\n func_call_list = func_dict[\n getCodeName(current_file, caller)\n ][lineno][1]\n append = func_call_list.append\n append(u'cfl=' + convertPath(callee_file))\n append(u'cfn=' + getCodeName(callee_file, callee))\n append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))\n append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))\n for func_name, line_dict in func_dict.iteritems():\n print(u'fn=%s' % func_name, file=out)\n for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):\n if func_hit_list:\n # Multiple function objects may \"reside\" on the same\n # line of the same file (same global dict).\n # Sum these up and produce a single cachegrind event.\n hits = sum(x for x, _ in func_hit_list)\n ticks = sum(x for _, x in func_hit_list)\n print(\n u'%i %i %i %i' % (\n lineno,\n hits,\n ticks,\n ticks // hits,\n ),\n file=out,\n )\n for line in func_call_list:\n print(line, file=out)\n",
"def annotate(self, out, filename=None, commandline=None, relative_path=False):\n \"\"\"\n Dump annotated source code with current profiling statistics to \"out\"\n file.\n Time unit: second.\n out (file-ish opened for writing)\n Destination of annotated sources.\n filename (str, collection of str)\n If provided, dump stats for given source file(s) only.\n If unordered collection, it will get sorted by decreasing total\n file score (total time if available, then total hit count).\n By default, list for all known files.\n commandline (anything with __str__)\n If provided, will be output as the command line used to generate\n this annotation.\n relative_path (bool)\n For compatibility with callgrind. Ignored.\n \"\"\"\n file_dict = self._mergeFileTiming()\n total_time = self.total_time\n if commandline is not None:\n print(u'Command line:', commandline, file=out)\n print(u'Total duration: %gs' % total_time, file=out)\n if not total_time:\n return\n def percent(value, scale):\n if scale == 0:\n return 0\n return value * 100 / scale\n for name in self._getFileNameList(filename):\n file_timing = file_dict[name]\n file_total_time = file_timing.getTotalTime()\n call_list_by_line = file_timing.getCallListByLine()\n print(u'File: %s' % name, file=out)\n print(u'File duration: %gs (%.2f%%)' % (file_total_time,\n percent(file_total_time, total_time)), file=out)\n print(_ANNOTATE_HEADER, file=out)\n print(_ANNOTATE_HORIZONTAL_LINE, file=out)\n last_line = file_timing.getLastLine()\n for lineno, line in LineIterator(\n self._getline,\n file_timing.filename,\n file_timing.global_dict,\n ):\n if not line and lineno > last_line:\n break\n hits, duration = file_timing.getHitStatsFor(lineno)\n print(_ANNOTATE_FORMAT % {\n u'lineno': lineno,\n u'hits': hits,\n u'time': duration,\n u'time_per_hit': duration / hits if hits else 0,\n u'percent': percent(duration, total_time),\n u'line': (line or u'').rstrip(),\n }, file=out)\n for (\n _,\n call_hits, call_duration,\n callee_file, callee,\n ) in call_list_by_line.get(lineno, ()):\n print(_ANNOTATE_CALL_FORMAT % {\n u'hits': call_hits,\n u'time': call_duration,\n u'time_per_hit': call_duration / call_hits,\n u'percent': percent(call_duration, total_time),\n u'callee_file': callee_file,\n u'callee_line': callee.co_firstlineno,\n u'callee_name': callee.co_name,\n }, file=out)\n"
] | class ProfileBase(object):
"""
Methods common to deterministic and statistic profiling.
Subclasses can override the "FileTiming" property to use a different class.
"""
__slots__ = (
'file_dict',
'global_dict',
'total_time',
'__dict__',
'__weakref__',
'merged_file_dict',
)
FileTiming = _FileTiming
def __init__(self):
self.file_dict = {}
self.merged_file_dict = {}
self.global_dict = {}
self.total_time = 0
def _getFileTiming(self, frame):
try:
return self.global_dict[id(frame.f_globals)]
except KeyError:
f_globals = frame.f_globals
name = self._getFilename(frame)
self.global_dict[id(f_globals)] = file_timing = self.FileTiming(
name,
f_globals,
self,
)
# file_dict modifications must be thread-safe to not lose measures.
# setdefault is atomic, append is atomic.
self.file_dict.setdefault(name, []).append(file_timing)
return file_timing
@staticmethod
def _getFilename(frame):
"""
Overload in subclasses to customise filename generation.
"""
return frame.f_code.co_filename
@staticmethod
def _getline(filename, lineno, global_dict):
"""
Overload in subclasses to customise source retrieval.
"""
return linecache.getline(filename, lineno, global_dict)
def _mergeFileTiming(self, rebuild=False):
merged_file_dict = self.merged_file_dict
if merged_file_dict and not rebuild:
return merged_file_dict
merged_file_dict.clear()
# Regroup by module, to find all duplicates from other threads.
by_global_dict = defaultdict(list)
for file_timing_list in self.file_dict.itervalues():
for file_timing in file_timing_list:
by_global_dict[
id(file_timing.global_dict)
].append(
file_timing,
)
# Resolve name conflicts.
global_to_named_dict = {}
for global_dict_id, file_timing_list in by_global_dict.iteritems():
file_timing = file_timing_list[0]
name = file_timing.filename
if name in merged_file_dict:
counter = count()
base_name = name
while name in merged_file_dict:
name = base_name + '_%i' % next(counter)
global_to_named_dict[global_dict_id] = merged_file_dict[name] = FileTiming(
name,
file_timing.global_dict,
file_timing.profiler, # Note: should be self
)
# Add all file timings from one module together under its
# deduplicated name. This needs to happen after all names
# are generated and all empty file timings are created so
# call events cross-references can be remapped.
for merged_file_timing in merged_file_dict.itervalues():
line_dict = merged_file_timing.line_dict
for file_timing in by_global_dict[id(merged_file_timing.global_dict)]:
for line, other_code_dict in file_timing.line_dict.iteritems():
code_dict = line_dict[line]
for code, (
other_hits,
other_duration,
) in other_code_dict.iteritems():
entry = code_dict[code]
entry[0] += other_hits
entry[1] += other_duration
call_dict = merged_file_timing.call_dict
for key, (
other_callee_file_timing,
other_hits,
other_duration,
) in file_timing.call_dict.iteritems():
try:
entry = call_dict[key]
except KeyError:
entry = call_dict[key] = [
global_to_named_dict[
id(other_callee_file_timing.global_dict)
],
other_hits,
other_duration,
]
else:
entry[1] += other_hits
entry[2] += other_duration
return merged_file_dict
def getFilenameSet(self):
"""
Returns a set of profiled file names.
Note: "file name" is used loosely here. See python documentation for
co_filename, linecache module and PEP302. It may not be a valid
filesystem path.
"""
result = set(self._mergeFileTiming())
# Ignore profiling code. __file__ does not always provide consistent
# results with f_code.co_filename (ex: easy_install with zipped egg),
# so inspect current frame instead.
# Get current file from one of pprofile methods. Compatible with
# implementations that do not have the inspect.currentframe() method
# (e.g. IronPython).
# XXX: Assumes that all of pprofile code is in a single file.
# XXX: Assumes that _initStack exists in pprofile module.
result.discard(inspect.getsourcefile(_initStack))
return result
def _getFileNameList(self, filename, may_sort=True):
if filename is None:
filename = self.getFilenameSet()
elif isinstance(filename, basestring):
return [filename]
if may_sort:
try:
# Detect if filename is an ordered data type.
filename[:0]
except TypeError:
# Not ordered, sort.
file_dict = self._mergeFileTiming()
filename = sorted(filename, reverse=True,
key=lambda x: file_dict[x].getSortKey()
)
return filename
def callgrind(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump statistics in callgrind format.
Contains:
- per-line hit count, time and time-per-hit
- call associations (call tree)
Note: hit count is not inclusive, in that it is not the sum of all
hits inside that call.
Time unit: microsecond (1e-6 second).
out (file-ish opened for writing)
Destination of callgrind profiling data.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this profiling data.
relative_path (bool)
When True, absolute elements are stripped from path. Useful when
maintaining several copies of source trees with their own
profiling result, so kcachegrind does not look in system-wide
files which may not match with profiled code.
"""
print(u'# callgrind format', file=out)
print(u'version: 1', file=out)
print(u'creator: pprofile', file=out)
print(u'event: usphit :microseconds/hit', file=out)
print(u'events: hits microseconds usphit', file=out)
if commandline is not None:
print(u'cmd:', commandline, file=out)
file_dict = self._mergeFileTiming()
if relative_path:
convertPath = _relpath
else:
convertPath = lambda x: x
if os.path.sep != "/":
# qCacheGrind (windows build) needs at least one UNIX separator
# in path to find the file. Adapt here even if this is probably
# more of a qCacheGrind issue...
convertPath = lambda x, cascade=convertPath: cascade(
'/'.join(x.split(os.path.sep))
)
code_to_name_dict = {}
homonym_counter = {}
def getCodeName(filename, code):
# Tracks code objects globally, because callee information needs
# to be consistent accross files.
# Inside a file, grants unique names to each code object.
try:
return code_to_name_dict[code]
except KeyError:
name = code.co_name + ':%i' % code.co_firstlineno
key = (filename, name)
homonym_count = homonym_counter.get(key, 0)
if homonym_count:
name += '_%i' % homonym_count
homonym_counter[key] = homonym_count + 1
code_to_name_dict[code] = name
return name
for current_file in self._getFileNameList(filename, may_sort=False):
file_timing = file_dict[current_file]
print(u'fl=%s' % convertPath(current_file), file=out)
# When a local callable is created an immediately executed, this
# loop would start a new "fn=" section but would not end it before
# emitting "cfn=" lines, making the callee appear as not being
# called by interrupted "fn=" section.
# So dispatch all functions in a first pass, and build
# uninterrupted sections in a second pass.
# Note: cost line is a list just to be mutable. A single item is
# expected.
func_dict = defaultdict(lambda: defaultdict(lambda: ([], [])))
for lineno, code, hits, duration in file_timing.iterHits():
func_dict[getCodeName(current_file, code)][lineno][0].append(
(hits, int(duration * 1000000)),
)
for (
lineno,
caller,
call_hits, call_duration,
callee_file, callee,
) in file_timing.iterCalls():
call_ticks = int(call_duration * 1000000)
func_call_list = func_dict[
getCodeName(current_file, caller)
][lineno][1]
append = func_call_list.append
append(u'cfl=' + convertPath(callee_file))
append(u'cfn=' + getCodeName(callee_file, callee))
append(u'calls=%i %i' % (call_hits, callee.co_firstlineno))
append(u'%i %i %i %i' % (lineno, call_hits, call_ticks, call_ticks // call_hits))
for func_name, line_dict in func_dict.iteritems():
print(u'fn=%s' % func_name, file=out)
for lineno, (func_hit_list, func_call_list) in sorted(line_dict.iteritems()):
if func_hit_list:
# Multiple function objects may "reside" on the same
# line of the same file (same global dict).
# Sum these up and produce a single cachegrind event.
hits = sum(x for x, _ in func_hit_list)
ticks = sum(x for _, x in func_hit_list)
print(
u'%i %i %i %i' % (
lineno,
hits,
ticks,
ticks // hits,
),
file=out,
)
for line in func_call_list:
print(line, file=out)
def annotate(self, out, filename=None, commandline=None, relative_path=False):
"""
Dump annotated source code with current profiling statistics to "out"
file.
Time unit: second.
out (file-ish opened for writing)
Destination of annotated sources.
filename (str, collection of str)
If provided, dump stats for given source file(s) only.
If unordered collection, it will get sorted by decreasing total
file score (total time if available, then total hit count).
By default, list for all known files.
commandline (anything with __str__)
If provided, will be output as the command line used to generate
this annotation.
relative_path (bool)
For compatibility with callgrind. Ignored.
"""
file_dict = self._mergeFileTiming()
total_time = self.total_time
if commandline is not None:
print(u'Command line:', commandline, file=out)
print(u'Total duration: %gs' % total_time, file=out)
if not total_time:
return
def percent(value, scale):
if scale == 0:
return 0
return value * 100 / scale
for name in self._getFileNameList(filename):
file_timing = file_dict[name]
file_total_time = file_timing.getTotalTime()
call_list_by_line = file_timing.getCallListByLine()
print(u'File: %s' % name, file=out)
print(u'File duration: %gs (%.2f%%)' % (file_total_time,
percent(file_total_time, total_time)), file=out)
print(_ANNOTATE_HEADER, file=out)
print(_ANNOTATE_HORIZONTAL_LINE, file=out)
last_line = file_timing.getLastLine()
for lineno, line in LineIterator(
self._getline,
file_timing.filename,
file_timing.global_dict,
):
if not line and lineno > last_line:
break
hits, duration = file_timing.getHitStatsFor(lineno)
print(_ANNOTATE_FORMAT % {
u'lineno': lineno,
u'hits': hits,
u'time': duration,
u'time_per_hit': duration / hits if hits else 0,
u'percent': percent(duration, total_time),
u'line': (line or u'').rstrip(),
}, file=out)
for (
_,
call_hits, call_duration,
callee_file, callee,
) in call_list_by_line.get(lineno, ()):
print(_ANNOTATE_CALL_FORMAT % {
u'hits': call_hits,
u'time': call_duration,
u'time_per_hit': call_duration / call_hits,
u'percent': percent(call_duration, total_time),
u'callee_file': callee_file,
u'callee_line': callee.co_firstlineno,
u'callee_name': callee.co_name,
}, file=out)
def _iterRawFile(self, name):
file_timing = self._mergeFileTiming()[name]
for lineno in count(1):
line = self._getline(file_timing.filename, lineno,
file_timing.global_dict)
if not line:
break
yield line
def iterSource(self):
"""
Iterator over all involved files.
Yields 2-tuple composed of file path and an iterator over
(non-annotated) source lines.
Can be used to generate a file tree for use with kcachegrind, for
example.
"""
for name in self.getFilenameSet():
yield name, self._iterRawFile(name)
# profile/cProfile-like API
def print_stats(self):
"""
Similar to profile.Profile.print_stats .
Returns None.
"""
self.annotate(EncodeOrReplaceWriter(sys.stdout))
|
vpelletier/pprofile | pprofile.py | ProfileRunnerBase.runctx | python | def runctx(self, cmd, globals, locals):
with self():
exec(cmd, globals, locals)
return self | Similar to profile.Profile.runctx . | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L752-L756 | null | class ProfileRunnerBase(object):
def __call__(self):
return self
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
# profile/cProfile-like API
def runcall(self, func, *args, **kw):
"""Similar to profile.Profile.runcall ."""
with self():
return func(*args, **kw)
def runfile(self, fd, argv, fd_name='<unknown>', compile_flags=0,
dont_inherit=1, globals={}):
with fd:
code = compile(fd.read(), fd_name, 'exec', flags=compile_flags,
dont_inherit=dont_inherit)
original_sys_argv = list(sys.argv)
ctx_globals = globals.copy()
ctx_globals['__file__'] = fd_name
ctx_globals['__name__'] = '__main__'
ctx_globals['__package__'] = None
try:
sys.argv[:] = argv
return self.runctx(code, ctx_globals, None)
finally:
sys.argv[:] = original_sys_argv
def runpath(self, path, argv):
original_sys_path = list(sys.path)
try:
sys.path.insert(0, os.path.dirname(path))
return self.runfile(open(path, 'rb'), argv, fd_name=path)
finally:
sys.path[:] = original_sys_path
def runmodule(self, module, argv):
original_sys_argv = list(sys.argv)
original_sys_path0 = sys.path[0]
try:
sys.path[0] = os.getcwd()
sys.argv[:] = argv
with self():
runpy.run_module(module, run_name='__main__', alter_sys=True)
finally:
sys.argv[:] = original_sys_argv
sys.path[0] = original_sys_path0
return self
|
vpelletier/pprofile | pprofile.py | Profile.enable | python | def enable(self):
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace) | Enable profiling. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L843-L851 | [
"def _enable(self):\n \"\"\"\n Overload this method when subclassing. Called before actually\n enabling trace.\n \"\"\"\n self.stack = _initStack()\n self.enabled_start = time()\n"
] | class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',
'_local_trace',
'stack',
'enabled_start',
)
def __init__(self, verbose=False):
super(Profile, self).__init__()
if verbose:
self._global_trace = _verboseProfileDecorator(self)(
self._real_global_trace)
self._local_trace = _verboseProfileDecorator(self)(
self._real_local_trace)
else:
self._global_trace = self._real_global_trace
self._local_trace = self._real_local_trace
self.stack = None
self.enabled_start = None
def _enable(self):
"""
Overload this method when subclassing. Called before actually
enabling trace.
"""
self.stack = _initStack()
self.enabled_start = time()
def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack
def disable(self):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
def __enter__(self):
"""
__enter__() -> self
"""
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Disables profiling.
"""
self.disable()
def _traceEvent(self, frame, event):
f_code = frame.f_code
lineno = frame.f_lineno
print('%10.6f%s%s %s:%s %s+%s' % (
time() - self.enabled_start,
' ' * len(self.stack[0]),
event,
f_code.co_filename,
lineno,
f_code.co_name,
lineno - f_code.co_firstlineno,
), file=sys.stderr)
def _real_global_trace(self, frame, event, arg):
local_trace = self._local_trace
if local_trace is not None:
event_time = time()
callee_entry = [event_time, 0, frame.f_lineno, event_time, 0]
stack, callee_dict = self.stack
try:
caller_entry = stack[-1]
except IndexError:
pass
else:
# Suspend caller frame
frame_time, frame_discount, lineno, line_time, line_duration = caller_entry
caller_entry[4] = event_time - line_time + line_duration
callee_dict[(frame.f_back.f_code, frame.f_code)].append(callee_entry)
stack.append(callee_entry)
return local_trace
def _real_local_trace(self, frame, event, arg):
if event == 'line' or event == 'return':
event_time = time()
stack, callee_dict = self.stack
try:
stack_entry = stack[-1]
except IndexError:
warn('Profiling stack underflow, disabling.')
self.disable()
return None
frame_time, frame_discount, lineno, line_time, line_duration = stack_entry
file_timing = self._getFileTiming(frame)
file_timing.hit(frame.f_code, lineno,
event_time - line_time + line_duration)
if event == 'line':
# Start a new line
stack_entry[2] = frame.f_lineno
stack_entry[3] = event_time
stack_entry[4] = 0
else:
# 'return' event, <frame> is still callee
# Resume caller frame
stack.pop()
stack[-1][3] = event_time
caller_frame = frame.f_back
caller_code = caller_frame.f_code
callee_code = frame.f_code
callee_entry_list = callee_dict[(caller_code, callee_code)]
callee_entry_list.pop()
call_duration = event_time - frame_time
if callee_entry_list:
# Callee is also somewhere up the stack, so discount this
# call duration from it.
callee_entry_list[-1][1] += call_duration
self._getFileTiming(caller_frame).call(
caller_code, caller_frame.f_lineno,
file_timing,
callee_code, call_duration - frame_discount,
frame,
)
return self._local_trace
# profile/cProfile-like API
def run(self, cmd):
"""Similar to profile.Profile.run ."""
import __main__
dikt = __main__.__dict__
return self.runctx(cmd, dikt, dikt)
|
vpelletier/pprofile | pprofile.py | Profile._disable | python | def _disable(self):
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack | Overload this method when subclassing. Called after actually disabling
trace. | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L853-L860 | null | class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',
'_local_trace',
'stack',
'enabled_start',
)
def __init__(self, verbose=False):
super(Profile, self).__init__()
if verbose:
self._global_trace = _verboseProfileDecorator(self)(
self._real_global_trace)
self._local_trace = _verboseProfileDecorator(self)(
self._real_local_trace)
else:
self._global_trace = self._real_global_trace
self._local_trace = self._real_local_trace
self.stack = None
self.enabled_start = None
def _enable(self):
"""
Overload this method when subclassing. Called before actually
enabling trace.
"""
self.stack = _initStack()
self.enabled_start = time()
def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace)
def disable(self):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
def __enter__(self):
"""
__enter__() -> self
"""
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Disables profiling.
"""
self.disable()
def _traceEvent(self, frame, event):
f_code = frame.f_code
lineno = frame.f_lineno
print('%10.6f%s%s %s:%s %s+%s' % (
time() - self.enabled_start,
' ' * len(self.stack[0]),
event,
f_code.co_filename,
lineno,
f_code.co_name,
lineno - f_code.co_firstlineno,
), file=sys.stderr)
def _real_global_trace(self, frame, event, arg):
local_trace = self._local_trace
if local_trace is not None:
event_time = time()
callee_entry = [event_time, 0, frame.f_lineno, event_time, 0]
stack, callee_dict = self.stack
try:
caller_entry = stack[-1]
except IndexError:
pass
else:
# Suspend caller frame
frame_time, frame_discount, lineno, line_time, line_duration = caller_entry
caller_entry[4] = event_time - line_time + line_duration
callee_dict[(frame.f_back.f_code, frame.f_code)].append(callee_entry)
stack.append(callee_entry)
return local_trace
def _real_local_trace(self, frame, event, arg):
if event == 'line' or event == 'return':
event_time = time()
stack, callee_dict = self.stack
try:
stack_entry = stack[-1]
except IndexError:
warn('Profiling stack underflow, disabling.')
self.disable()
return None
frame_time, frame_discount, lineno, line_time, line_duration = stack_entry
file_timing = self._getFileTiming(frame)
file_timing.hit(frame.f_code, lineno,
event_time - line_time + line_duration)
if event == 'line':
# Start a new line
stack_entry[2] = frame.f_lineno
stack_entry[3] = event_time
stack_entry[4] = 0
else:
# 'return' event, <frame> is still callee
# Resume caller frame
stack.pop()
stack[-1][3] = event_time
caller_frame = frame.f_back
caller_code = caller_frame.f_code
callee_code = frame.f_code
callee_entry_list = callee_dict[(caller_code, callee_code)]
callee_entry_list.pop()
call_duration = event_time - frame_time
if callee_entry_list:
# Callee is also somewhere up the stack, so discount this
# call duration from it.
callee_entry_list[-1][1] += call_duration
self._getFileTiming(caller_frame).call(
caller_code, caller_frame.f_lineno,
file_timing,
callee_code, call_duration - frame_discount,
frame,
)
return self._local_trace
# profile/cProfile-like API
def run(self, cmd):
"""Similar to profile.Profile.run ."""
import __main__
dikt = __main__.__dict__
return self.runctx(cmd, dikt, dikt)
|
vpelletier/pprofile | pprofile.py | Profile.run | python | def run(self, cmd):
import __main__
dikt = __main__.__dict__
return self.runctx(cmd, dikt, dikt) | Similar to profile.Profile.run . | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L959-L963 | [
"def runctx(self, cmd, globals, locals):\n \"\"\"Similar to profile.Profile.runctx .\"\"\"\n with self():\n exec(cmd, globals, locals)\n return self\n"
] | class Profile(ProfileBase, ProfileRunnerBase):
"""
Deterministic, recursive, line-granularity, profiling class.
Does not require any source code change to work.
If the performance hit is too large, it can benefit from some
integration (calling enable/disable around selected code chunks).
The sum of time spent in all profiled lines is less than the total
profiled time reported. This is (part of) profiling overhead.
This also mans that sum of time-spent-on-line percentage is less than 100%.
All times are "internal time", ie they do not count time spent inside
called (profilable, so python) functions.
"""
__slots__ = (
'_global_trace',
'_local_trace',
'stack',
'enabled_start',
)
def __init__(self, verbose=False):
super(Profile, self).__init__()
if verbose:
self._global_trace = _verboseProfileDecorator(self)(
self._real_global_trace)
self._local_trace = _verboseProfileDecorator(self)(
self._real_local_trace)
else:
self._global_trace = self._real_global_trace
self._local_trace = self._real_local_trace
self.stack = None
self.enabled_start = None
def _enable(self):
"""
Overload this method when subclassing. Called before actually
enabling trace.
"""
self.stack = _initStack()
self.enabled_start = time()
def enable(self):
"""
Enable profiling.
"""
if self.enabled_start:
warn('Duplicate "enable" call')
else:
self._enable()
sys.settrace(self._global_trace)
def _disable(self):
"""
Overload this method when subclassing. Called after actually disabling
trace.
"""
self.total_time += time() - self.enabled_start
self.enabled_start = None
del self.stack
def disable(self):
"""
Disable profiling.
"""
if self.enabled_start:
sys.settrace(None)
self._disable()
else:
warn('Duplicate "disable" call')
def __enter__(self):
"""
__enter__() -> self
"""
self.enable()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Disables profiling.
"""
self.disable()
def _traceEvent(self, frame, event):
f_code = frame.f_code
lineno = frame.f_lineno
print('%10.6f%s%s %s:%s %s+%s' % (
time() - self.enabled_start,
' ' * len(self.stack[0]),
event,
f_code.co_filename,
lineno,
f_code.co_name,
lineno - f_code.co_firstlineno,
), file=sys.stderr)
def _real_global_trace(self, frame, event, arg):
local_trace = self._local_trace
if local_trace is not None:
event_time = time()
callee_entry = [event_time, 0, frame.f_lineno, event_time, 0]
stack, callee_dict = self.stack
try:
caller_entry = stack[-1]
except IndexError:
pass
else:
# Suspend caller frame
frame_time, frame_discount, lineno, line_time, line_duration = caller_entry
caller_entry[4] = event_time - line_time + line_duration
callee_dict[(frame.f_back.f_code, frame.f_code)].append(callee_entry)
stack.append(callee_entry)
return local_trace
def _real_local_trace(self, frame, event, arg):
if event == 'line' or event == 'return':
event_time = time()
stack, callee_dict = self.stack
try:
stack_entry = stack[-1]
except IndexError:
warn('Profiling stack underflow, disabling.')
self.disable()
return None
frame_time, frame_discount, lineno, line_time, line_duration = stack_entry
file_timing = self._getFileTiming(frame)
file_timing.hit(frame.f_code, lineno,
event_time - line_time + line_duration)
if event == 'line':
# Start a new line
stack_entry[2] = frame.f_lineno
stack_entry[3] = event_time
stack_entry[4] = 0
else:
# 'return' event, <frame> is still callee
# Resume caller frame
stack.pop()
stack[-1][3] = event_time
caller_frame = frame.f_back
caller_code = caller_frame.f_code
callee_code = frame.f_code
callee_entry_list = callee_dict[(caller_code, callee_code)]
callee_entry_list.pop()
call_duration = event_time - frame_time
if callee_entry_list:
# Callee is also somewhere up the stack, so discount this
# call duration from it.
callee_entry_list[-1][1] += call_duration
self._getFileTiming(caller_frame).call(
caller_code, caller_frame.f_lineno,
file_timing,
callee_code, call_duration - frame_discount,
frame,
)
return self._local_trace
# profile/cProfile-like API
|
vpelletier/pprofile | pprofile.py | StatisticThread.stop | python | def stop(self):
if self.is_alive():
self._can_run = False
self._stop_event.set()
self._profiler.total_time += time() - self._start_time
self._start_time = None | Request thread to stop.
Does not wait for actual termination (use join() method). | train | https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1099-L1108 | null | class StatisticThread(threading.Thread, ProfileRunnerBase):
"""
Usage in a nutshell:
with StatisticThread() as profiler_thread:
# do stuff
profiler_thread.profiler.print_stats()
"""
__slots__ = (
'_test',
'_start_time',
'clean_exit',
)
def __init__(self, profiler=None, period=.001, single=True, group=None, name=None):
"""
profiler (None or StatisticProfile instance)
Available on instances as the "profiler" read-only property.
If None, a new profiler instance will be created.
period (float)
How many seconds to wait between consecutive samples.
The smaller, the more profiling overhead, but the faster results
become meaningful.
The larger, the less profiling overhead, but requires long profiling
session to get meaningful results.
single (bool)
Profile only the thread which created this instance.
group, name
See Python's threading.Thread API.
"""
if profiler is None:
profiler = StatisticProfile()
if single:
self._test = lambda x, ident=threading.current_thread().ident: ident == x
else:
self._test = None
super(StatisticThread, self).__init__(
group=group,
name=name,
)
self._stop_event = threading.Event()
self._period = period
self._profiler = profiler
profiler.total_time = 0
self.daemon = True
self.clean_exit = False
@property
def profiler(self):
return self._profiler
def start(self):
self.clean_exit = False
self._can_run = True
self._start_time = time()
super(StatisticThread, self).start()
def __enter__(self):
"""
__enter__() -> self
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Stops and joins profiling thread.
"""
self.stop()
self.join()
def run(self):
current_frames = sys._current_frames
test = self._test
if test is None:
test = lambda x, ident=threading.current_thread().ident: ident != x
sample = self._profiler.sample
stop_event = self._stop_event
wait = partial(stop_event.wait, self._period)
while self._can_run:
for ident, frame in current_frames().iteritems():
if test(ident):
sample(frame)
frame = None
wait()
stop_event.clear()
self.clean_exit = True
def callgrind(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.callgrind(*args, **kw)
def annotate(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.annotate(*args, **kw)
def dump_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.dump_stats(*args, **kw)
def print_stats(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.print_stats(*args, **kw)
def iterSource(self, *args, **kw):
warn('deprecated', DeprecationWarning)
return self._profiler.iterSource(*args, **kw)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.auth | python | def auth(self):
if self.oauth:
return self.oauth
return (self.username, self.password) | Return credentials for current Bitbucket user. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L71-L75 | null | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.authorize | python | def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None) | Call this with your consumer key, secret and callback URL, to
generate a token for verification. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L143-L170 | [
"def finalize_oauth(self, access_token, access_token_secret):\n \"\"\" Called internally once auth process is complete. \"\"\"\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n\n # Final OAuth object\n self.oauth = OAuth1(\n self.consumer_key,\n client_secret=self.consumer_secret,\n resource_owner_key=self.access_token,\n resource_owner_secret=self.access_token_secret)\n",
"def url(self, action, **kwargs):\n \"\"\" Construct and return the URL for a specific API service. \"\"\"\n # TODO : should be static method ?\n return self.URLS['BASE'] % self.URLS[action] % kwargs\n"
] | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.verify | python | def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None) | After converting the token into verifier, call this to finalize the
authorization. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L172-L198 | [
"def finalize_oauth(self, access_token, access_token_secret):\n \"\"\" Called internally once auth process is complete. \"\"\"\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n\n # Final OAuth object\n self.oauth = OAuth1(\n self.consumer_key,\n client_secret=self.consumer_secret,\n resource_owner_key=self.access_token,\n resource_owner_secret=self.access_token_secret)\n",
"def url(self, action, **kwargs):\n \"\"\" Construct and return the URL for a specific API service. \"\"\"\n # TODO : should be static method ?\n return self.URLS['BASE'] % self.URLS[action] % kwargs\n"
] | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.finalize_oauth | python | def finalize_oauth(self, access_token, access_token_secret):
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret) | Called internally once auth process is complete. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L200-L210 | null | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.dispatch | python | def dispatch(self, method, url, auth=None, params=None, **kwargs):
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error) | Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L216-L251 | null | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.url | python | def url(self, action, **kwargs):
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs | Construct and return the URL for a specific API service. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L253-L256 | null | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.get_user | python | def get_user(self, username=None):
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response | Returns user informations.
If username is not defined, tries to return own informations. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L262-L273 | [
"def dispatch(self, method, url, auth=None, params=None, **kwargs):\n \"\"\" Send HTTP request, with given method,\n credentials and data to the given URL,\n and return the success and the result on success.\n \"\"\"\n r = Request(\n method=method,\n url=url,\n auth=auth,\n params=params,\n data=kwargs)\n s = Session()\n resp = s.send(r.prepare())\n status = resp.status_code\n text = resp.text\n error = resp.reason\n if status >= 200 and status < 300:\n if text:\n try:\n return (True, json.loads(text))\n except TypeError:\n pass\n except ValueError:\n pass\n return (True, text)\n elif status >= 300 and status < 400:\n return (\n False,\n 'Unauthorized access, '\n 'please check your credentials.')\n elif status >= 400 and status < 500:\n return (False, 'Service not found.')\n elif status >= 500 and status < 600:\n return (False, 'Server error.')\n else:\n return (False, error)\n",
"def url(self, action, **kwargs):\n \"\"\" Construct and return the URL for a specific API service. \"\"\"\n # TODO : should be static method ?\n return self.URLS['BASE'] % self.URLS[action] % kwargs\n"
] | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.get_tags | python | def get_tags(self, repo_slug=None):
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth) | Get a single repository on Bitbucket and return its tags. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L275-L279 | [
"def dispatch(self, method, url, auth=None, params=None, **kwargs):\n \"\"\" Send HTTP request, with given method,\n credentials and data to the given URL,\n and return the success and the result on success.\n \"\"\"\n r = Request(\n method=method,\n url=url,\n auth=auth,\n params=params,\n data=kwargs)\n s = Session()\n resp = s.send(r.prepare())\n status = resp.status_code\n text = resp.text\n error = resp.reason\n if status >= 200 and status < 300:\n if text:\n try:\n return (True, json.loads(text))\n except TypeError:\n pass\n except ValueError:\n pass\n return (True, text)\n elif status >= 300 and status < 400:\n return (\n False,\n 'Unauthorized access, '\n 'please check your credentials.')\n elif status >= 400 and status < 500:\n return (False, 'Service not found.')\n elif status >= 500 and status < 600:\n return (False, 'Server error.')\n else:\n return (False, error)\n",
"def url(self, action, **kwargs):\n \"\"\" Construct and return the URL for a specific API service. \"\"\"\n # TODO : should be static method ?\n return self.URLS['BASE'] % self.URLS[action] % kwargs\n"
] | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.get_branches | python | def get_branches(self, repo_slug=None):
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth) | Get a single repository on Bitbucket and return its branches. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L281-L287 | [
"def dispatch(self, method, url, auth=None, params=None, **kwargs):\n \"\"\" Send HTTP request, with given method,\n credentials and data to the given URL,\n and return the success and the result on success.\n \"\"\"\n r = Request(\n method=method,\n url=url,\n auth=auth,\n params=params,\n data=kwargs)\n s = Session()\n resp = s.send(r.prepare())\n status = resp.status_code\n text = resp.text\n error = resp.reason\n if status >= 200 and status < 300:\n if text:\n try:\n return (True, json.loads(text))\n except TypeError:\n pass\n except ValueError:\n pass\n return (True, text)\n elif status >= 300 and status < 400:\n return (\n False,\n 'Unauthorized access, '\n 'please check your credentials.')\n elif status >= 400 and status < 500:\n return (False, 'Service not found.')\n elif status >= 500 and status < 600:\n return (False, 'Server error.')\n else:\n return (False, error)\n",
"def url(self, action, **kwargs):\n \"\"\" Construct and return the URL for a specific API service. \"\"\"\n # TODO : should be static method ?\n return self.URLS['BASE'] % self.URLS[action] % kwargs\n"
] | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_privileges(self):
""" Get privledges for this user. """
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/bitbucket.py | Bitbucket.get_privileges | python | def get_privileges(self):
url = self.url('GET_USER_PRIVILEGES')
return self.dispatch('GET', url, auth=self.auth) | Get privledges for this user. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/bitbucket.py#L289-L292 | [
"def dispatch(self, method, url, auth=None, params=None, **kwargs):\n \"\"\" Send HTTP request, with given method,\n credentials and data to the given URL,\n and return the success and the result on success.\n \"\"\"\n r = Request(\n method=method,\n url=url,\n auth=auth,\n params=params,\n data=kwargs)\n s = Session()\n resp = s.send(r.prepare())\n status = resp.status_code\n text = resp.text\n error = resp.reason\n if status >= 200 and status < 300:\n if text:\n try:\n return (True, json.loads(text))\n except TypeError:\n pass\n except ValueError:\n pass\n return (True, text)\n elif status >= 300 and status < 400:\n return (\n False,\n 'Unauthorized access, '\n 'please check your credentials.')\n elif status >= 400 and status < 500:\n return (False, 'Service not found.')\n elif status >= 500 and status < 600:\n return (False, 'Server error.')\n else:\n return (False, error)\n",
"def url(self, action, **kwargs):\n \"\"\" Construct and return the URL for a specific API service. \"\"\"\n # TODO : should be static method ?\n return self.URLS['BASE'] % self.URLS[action] % kwargs\n"
] | class Bitbucket(object):
""" This class lets you interact with the bitbucket public API. """
def __init__(self, username='', password='', repo_name_or_slug=''):
self.username = username
self.password = password
self.repo_slug = repo_name_or_slug
self.repo_tree = {}
self.URLS = URLS
self.repository = Repository(self)
self.service = Service(self)
self.ssh = SSH(self)
self.issue = Issue(self)
self.deploy_key = DeployKey(self)
self.access_token = None
self.access_token_secret = None
self.consumer_key = None
self.consumer_secret = None
self.oauth = None
# ===================
# = Getters/Setters =
# ===================
@property
def auth(self):
""" Return credentials for current Bitbucket user. """
if self.oauth:
return self.oauth
return (self.username, self.password)
@property
def username(self):
"""Return your repository's username."""
return self._username
@username.setter
def username(self, value):
try:
if isinstance(value, basestring):
self._username = unicode(value)
except NameError:
self._username = value
if value is None:
self._username = None
@username.deleter
def username(self):
del self._username
@property
def password(self):
"""Return your repository's password."""
return self._password
@password.setter
def password(self, value):
try:
if isinstance(value, basestring):
self._password = unicode(value)
except NameError:
self._password = value
if value is None:
self._password = None
@password.deleter
def password(self):
del self._password
@property
def repo_slug(self):
"""Return your repository's slug name."""
return self._repo_slug
@repo_slug.setter
def repo_slug(self, value):
if value is None:
self._repo_slug = None
else:
try:
if isinstance(value, basestring):
value = unicode(value)
except NameError:
pass
value = value.lower()
self._repo_slug = re.sub(r'[^a-z0-9_-]+', '-', value)
@repo_slug.deleter
def repo_slug(self):
del self._repo_slug
# ========================
# = Oauth authentication =
# ========================
def authorize(self, consumer_key, consumer_secret, callback_url=None,
access_token=None, access_token_secret=None):
"""
Call this with your consumer key, secret and callback URL, to
generate a token for verification.
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if not access_token and not access_token_secret:
if not callback_url:
return (False, "Callback URL required")
oauth = OAuth1(
consumer_key,
client_secret=consumer_secret,
callback_uri=callback_url)
r = requests.post(self.url('REQUEST_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
self.access_token = creds.get('oauth_token')[0]
self.access_token_secret = creds.get('oauth_token_secret')[0]
else:
return (False, r.content)
else:
self.finalize_oauth(access_token, access_token_secret)
return (True, None)
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None)
def finalize_oauth(self, access_token, access_token_secret):
""" Called internally once auth process is complete. """
self.access_token = access_token
self.access_token_secret = access_token_secret
# Final OAuth object
self.oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret)
# ======================
# = High lvl functions =
# ======================
def dispatch(self, method, url, auth=None, params=None, **kwargs):
""" Send HTTP request, with given method,
credentials and data to the given URL,
and return the success and the result on success.
"""
r = Request(
method=method,
url=url,
auth=auth,
params=params,
data=kwargs)
s = Session()
resp = s.send(r.prepare())
status = resp.status_code
text = resp.text
error = resp.reason
if status >= 200 and status < 300:
if text:
try:
return (True, json.loads(text))
except TypeError:
pass
except ValueError:
pass
return (True, text)
elif status >= 300 and status < 400:
return (
False,
'Unauthorized access, '
'please check your credentials.')
elif status >= 400 and status < 500:
return (False, 'Service not found.')
elif status >= 500 and status < 600:
return (False, 'Server error.')
else:
return (False, error)
def url(self, action, **kwargs):
""" Construct and return the URL for a specific API service. """
# TODO : should be static method ?
return self.URLS['BASE'] % self.URLS[action] % kwargs
# =====================
# = General functions =
# =====================
def get_user(self, username=None):
""" Returns user informations.
If username is not defined, tries to return own informations.
"""
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
def get_branches(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its branches."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_BRANCHES',
username=self.username,
repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth)
|
Sheeprider/BitBucket-api | bitbucket/deploy_key.py | DeployKey.create | python | def create(self, repo_slug=None, key=None, label=None):
key = '%s' % key
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('SET_DEPLOY_KEY',
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('POST',
url,
auth=self.bitbucket.auth,
key=key,
label=label) | Associate an ssh key with your repo and return it. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/deploy_key.py#L37-L49 | null | class DeployKey(object):
""" This class provide services-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def all(self, repo_slug=None):
""" Get all ssh keys associated with a repo
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_DEPLOY_KEYS',
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, repo_slug=None, key_id=None):
""" Get one of the ssh keys associated with this repo
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_DEPLOY_KEY',
key_id=key_id,
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def delete(self, repo_slug=None, key_id=None):
""" Delete one of the ssh keys associated with your repo.
Please use with caution as there is NO confimation and NO undo.
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_DEPLOY_KEY',
key_id=key_id,
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
Sheeprider/BitBucket-api | bitbucket/repository.py | Repository.public | python | def public(self, username=None):
username = username or self.bitbucket.username or ''
url = self.bitbucket.url('GET_USER', username=username)
response = self.bitbucket.dispatch('GET', url)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response | Returns all public repositories from an user.
If username is not defined, tries to return own public repos. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/repository.py#L50-L61 | null | class Repository(object):
""" This class provide repository-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def _get_files_in_dir(self, repo_slug=None, dir='/'):
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
dir = dir.lstrip('/')
url = self.bitbucket.url(
'GET_ARCHIVE',
username=self.bitbucket.username,
repo_slug=repo_slug,
format='src')
dir_url = url + dir
response = self.bitbucket.dispatch('GET', dir_url, auth=self.bitbucket.auth)
if response[0] and isinstance(response[1], dict):
repo_tree = response[1]
url = self.bitbucket.url(
'GET_ARCHIVE',
username=self.bitbucket.username,
repo_slug=repo_slug,
format='raw')
# Download all files in dir
for file in repo_tree['files']:
file_url = url + '/'.join((file['path'],))
response = self.bitbucket.dispatch('GET', file_url, auth=self.bitbucket.auth)
self.bitbucket.repo_tree[file['path']] = response[1]
# recursively download in dirs
for directory in repo_tree['directories']:
dir_path = '/'.join((dir, directory))
self._get_files_in_dir(repo_slug=repo_slug, dir=dir_path)
def all(self):
""" Return own repositories."""
url = self.bitbucket.url('GET_USER', username=self.bitbucket.username)
response = self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response
def get(self, repo_slug=None):
""" Get a single repository on Bitbucket and return it."""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, repo_name, scm='git', private=True, **kwargs):
""" Creates a new repository on own Bitbucket account and return it."""
url = self.bitbucket.url('CREATE_REPO')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs)
def update(self, repo_slug=None, **kwargs):
""" Updates repository on own Bitbucket account and return it."""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('UPDATE_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('PUT', url, auth=self.bitbucket.auth, **kwargs)
def delete(self, repo_slug=None):
""" Delete a repository on own Bitbucket account.
Please use with caution as there is NO confimation and NO undo.
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
def archive(self, repo_slug=None, format='zip', prefix=''):
""" Get one of your repositories and compress it as an archive.
Return the path of the archive.
format parameter is curently not supported.
"""
prefix = '%s'.lstrip('/') % prefix
self._get_files_in_dir(repo_slug=repo_slug, dir='/')
if self.bitbucket.repo_tree:
with NamedTemporaryFile(delete=False) as archive:
with ZipFile(archive, 'w') as zip_archive:
for name, file in self.bitbucket.repo_tree.items():
with NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file.encode('utf-8'))
zip_archive.write(temp_file.name, prefix + name)
return (True, archive.name)
return (False, 'Could not archive your project.')
|
Sheeprider/BitBucket-api | bitbucket/repository.py | Repository.all | python | def all(self):
url = self.bitbucket.url('GET_USER', username=self.bitbucket.username)
response = self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response | Return own repositories. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/repository.py#L63-L71 | null | class Repository(object):
""" This class provide repository-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def _get_files_in_dir(self, repo_slug=None, dir='/'):
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
dir = dir.lstrip('/')
url = self.bitbucket.url(
'GET_ARCHIVE',
username=self.bitbucket.username,
repo_slug=repo_slug,
format='src')
dir_url = url + dir
response = self.bitbucket.dispatch('GET', dir_url, auth=self.bitbucket.auth)
if response[0] and isinstance(response[1], dict):
repo_tree = response[1]
url = self.bitbucket.url(
'GET_ARCHIVE',
username=self.bitbucket.username,
repo_slug=repo_slug,
format='raw')
# Download all files in dir
for file in repo_tree['files']:
file_url = url + '/'.join((file['path'],))
response = self.bitbucket.dispatch('GET', file_url, auth=self.bitbucket.auth)
self.bitbucket.repo_tree[file['path']] = response[1]
# recursively download in dirs
for directory in repo_tree['directories']:
dir_path = '/'.join((dir, directory))
self._get_files_in_dir(repo_slug=repo_slug, dir=dir_path)
def public(self, username=None):
""" Returns all public repositories from an user.
If username is not defined, tries to return own public repos.
"""
username = username or self.bitbucket.username or ''
url = self.bitbucket.url('GET_USER', username=username)
response = self.bitbucket.dispatch('GET', url)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response
def get(self, repo_slug=None):
""" Get a single repository on Bitbucket and return it."""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, repo_name, scm='git', private=True, **kwargs):
""" Creates a new repository on own Bitbucket account and return it."""
url = self.bitbucket.url('CREATE_REPO')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs)
def update(self, repo_slug=None, **kwargs):
""" Updates repository on own Bitbucket account and return it."""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('UPDATE_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('PUT', url, auth=self.bitbucket.auth, **kwargs)
def delete(self, repo_slug=None):
""" Delete a repository on own Bitbucket account.
Please use with caution as there is NO confimation and NO undo.
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
def archive(self, repo_slug=None, format='zip', prefix=''):
""" Get one of your repositories and compress it as an archive.
Return the path of the archive.
format parameter is curently not supported.
"""
prefix = '%s'.lstrip('/') % prefix
self._get_files_in_dir(repo_slug=repo_slug, dir='/')
if self.bitbucket.repo_tree:
with NamedTemporaryFile(delete=False) as archive:
with ZipFile(archive, 'w') as zip_archive:
for name, file in self.bitbucket.repo_tree.items():
with NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file.encode('utf-8'))
zip_archive.write(temp_file.name, prefix + name)
return (True, archive.name)
return (False, 'Could not archive your project.')
|
Sheeprider/BitBucket-api | bitbucket/repository.py | Repository.create | python | def create(self, repo_name, scm='git', private=True, **kwargs):
url = self.bitbucket.url('CREATE_REPO')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs) | Creates a new repository on own Bitbucket account and return it. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/repository.py#L79-L82 | null | class Repository(object):
""" This class provide repository-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def _get_files_in_dir(self, repo_slug=None, dir='/'):
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
dir = dir.lstrip('/')
url = self.bitbucket.url(
'GET_ARCHIVE',
username=self.bitbucket.username,
repo_slug=repo_slug,
format='src')
dir_url = url + dir
response = self.bitbucket.dispatch('GET', dir_url, auth=self.bitbucket.auth)
if response[0] and isinstance(response[1], dict):
repo_tree = response[1]
url = self.bitbucket.url(
'GET_ARCHIVE',
username=self.bitbucket.username,
repo_slug=repo_slug,
format='raw')
# Download all files in dir
for file in repo_tree['files']:
file_url = url + '/'.join((file['path'],))
response = self.bitbucket.dispatch('GET', file_url, auth=self.bitbucket.auth)
self.bitbucket.repo_tree[file['path']] = response[1]
# recursively download in dirs
for directory in repo_tree['directories']:
dir_path = '/'.join((dir, directory))
self._get_files_in_dir(repo_slug=repo_slug, dir=dir_path)
def public(self, username=None):
""" Returns all public repositories from an user.
If username is not defined, tries to return own public repos.
"""
username = username or self.bitbucket.username or ''
url = self.bitbucket.url('GET_USER', username=username)
response = self.bitbucket.dispatch('GET', url)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response
def all(self):
""" Return own repositories."""
url = self.bitbucket.url('GET_USER', username=self.bitbucket.username)
response = self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response
def get(self, repo_slug=None):
""" Get a single repository on Bitbucket and return it."""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def update(self, repo_slug=None, **kwargs):
""" Updates repository on own Bitbucket account and return it."""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('UPDATE_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('PUT', url, auth=self.bitbucket.auth, **kwargs)
def delete(self, repo_slug=None):
""" Delete a repository on own Bitbucket account.
Please use with caution as there is NO confimation and NO undo.
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
def archive(self, repo_slug=None, format='zip', prefix=''):
""" Get one of your repositories and compress it as an archive.
Return the path of the archive.
format parameter is curently not supported.
"""
prefix = '%s'.lstrip('/') % prefix
self._get_files_in_dir(repo_slug=repo_slug, dir='/')
if self.bitbucket.repo_tree:
with NamedTemporaryFile(delete=False) as archive:
with ZipFile(archive, 'w') as zip_archive:
for name, file in self.bitbucket.repo_tree.items():
with NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file.encode('utf-8'))
zip_archive.write(temp_file.name, prefix + name)
return (True, archive.name)
return (False, 'Could not archive your project.')
|
Sheeprider/BitBucket-api | bitbucket/repository.py | Repository.archive | python | def archive(self, repo_slug=None, format='zip', prefix=''):
prefix = '%s'.lstrip('/') % prefix
self._get_files_in_dir(repo_slug=repo_slug, dir='/')
if self.bitbucket.repo_tree:
with NamedTemporaryFile(delete=False) as archive:
with ZipFile(archive, 'w') as zip_archive:
for name, file in self.bitbucket.repo_tree.items():
with NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file.encode('utf-8'))
zip_archive.write(temp_file.name, prefix + name)
return (True, archive.name)
return (False, 'Could not archive your project.') | Get one of your repositories and compress it as an archive.
Return the path of the archive.
format parameter is curently not supported. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/repository.py#L98-L114 | [
"def _get_files_in_dir(self, repo_slug=None, dir='/'):\n repo_slug = repo_slug or self.bitbucket.repo_slug or ''\n dir = dir.lstrip('/')\n url = self.bitbucket.url(\n 'GET_ARCHIVE',\n username=self.bitbucket.username,\n repo_slug=repo_slug,\n format='src')\n dir_url = url + dir\n response = self.bitbucket.dispatch('GET', dir_url, auth=self.bitbucket.auth)\n if response[0] and isinstance(response[1], dict):\n repo_tree = response[1]\n url = self.bitbucket.url(\n 'GET_ARCHIVE',\n username=self.bitbucket.username,\n repo_slug=repo_slug,\n format='raw')\n # Download all files in dir\n for file in repo_tree['files']:\n file_url = url + '/'.join((file['path'],))\n response = self.bitbucket.dispatch('GET', file_url, auth=self.bitbucket.auth)\n self.bitbucket.repo_tree[file['path']] = response[1]\n # recursively download in dirs\n for directory in repo_tree['directories']:\n dir_path = '/'.join((dir, directory))\n self._get_files_in_dir(repo_slug=repo_slug, dir=dir_path)\n"
] | class Repository(object):
""" This class provide repository-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def _get_files_in_dir(self, repo_slug=None, dir='/'):
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
dir = dir.lstrip('/')
url = self.bitbucket.url(
'GET_ARCHIVE',
username=self.bitbucket.username,
repo_slug=repo_slug,
format='src')
dir_url = url + dir
response = self.bitbucket.dispatch('GET', dir_url, auth=self.bitbucket.auth)
if response[0] and isinstance(response[1], dict):
repo_tree = response[1]
url = self.bitbucket.url(
'GET_ARCHIVE',
username=self.bitbucket.username,
repo_slug=repo_slug,
format='raw')
# Download all files in dir
for file in repo_tree['files']:
file_url = url + '/'.join((file['path'],))
response = self.bitbucket.dispatch('GET', file_url, auth=self.bitbucket.auth)
self.bitbucket.repo_tree[file['path']] = response[1]
# recursively download in dirs
for directory in repo_tree['directories']:
dir_path = '/'.join((dir, directory))
self._get_files_in_dir(repo_slug=repo_slug, dir=dir_path)
def public(self, username=None):
""" Returns all public repositories from an user.
If username is not defined, tries to return own public repos.
"""
username = username or self.bitbucket.username or ''
url = self.bitbucket.url('GET_USER', username=username)
response = self.bitbucket.dispatch('GET', url)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response
def all(self):
""" Return own repositories."""
url = self.bitbucket.url('GET_USER', username=self.bitbucket.username)
response = self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
try:
return (response[0], response[1]['repositories'])
except TypeError:
pass
return response
def get(self, repo_slug=None):
""" Get a single repository on Bitbucket and return it."""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, repo_name, scm='git', private=True, **kwargs):
""" Creates a new repository on own Bitbucket account and return it."""
url = self.bitbucket.url('CREATE_REPO')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs)
def update(self, repo_slug=None, **kwargs):
""" Updates repository on own Bitbucket account and return it."""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('UPDATE_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('PUT', url, auth=self.bitbucket.auth, **kwargs)
def delete(self, repo_slug=None):
""" Delete a repository on own Bitbucket account.
Please use with caution as there is NO confimation and NO undo.
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_REPO', username=self.bitbucket.username, repo_slug=repo_slug)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
Sheeprider/BitBucket-api | bitbucket/issue_comment.py | IssueComment.create | python | def create(self, issue_id=None, repo_slug=None, **kwargs):
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('CREATE_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id)
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, **kwargs) | Add an issue comment to one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/issue_comment.py#L44-L55 | null | class IssueComment(object):
""" This class provide issue's comments related methods to Bitbucket objects."""
def __init__(self, issue):
self.issue = issue
self.bitbucket = self.issue.bitbucket
self.bitbucket.URLS.update(URLS)
self.issue_id = issue.issue_id
def all(self, issue_id=None, repo_slug=None):
""" Get issue comments from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_COMMENTS',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, comment_id, issue_id=None, repo_slug=None):
""" Get an issue from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def update(self, comment_id, issue_id=None, repo_slug=None, **kwargs):
""" Update an issue comment in one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('UPDATE_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('PUT', url, auth=self.bitbucket.auth, **kwargs)
def delete(self, comment_id, issue_id=None, repo_slug=None):
""" Delete an issue from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
Sheeprider/BitBucket-api | bitbucket/issue_comment.py | IssueComment.delete | python | def delete(self, comment_id, issue_id=None, repo_slug=None):
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth) | Delete an issue from one of your repositories. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/issue_comment.py#L71-L81 | null | class IssueComment(object):
""" This class provide issue's comments related methods to Bitbucket objects."""
def __init__(self, issue):
self.issue = issue
self.bitbucket = self.issue.bitbucket
self.bitbucket.URLS.update(URLS)
self.issue_id = issue.issue_id
def all(self, issue_id=None, repo_slug=None):
""" Get issue comments from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_COMMENTS',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, comment_id, issue_id=None, repo_slug=None):
""" Get an issue from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, issue_id=None, repo_slug=None, **kwargs):
""" Add an issue comment to one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('CREATE_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id)
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, **kwargs)
def update(self, comment_id, issue_id=None, repo_slug=None, **kwargs):
""" Update an issue comment in one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('UPDATE_COMMENT',
username=self.bitbucket.username,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('PUT', url, auth=self.bitbucket.auth, **kwargs)
|
Sheeprider/BitBucket-api | bitbucket/ssh.py | SSH.all | python | def all(self):
url = self.bitbucket.url('GET_SSH_KEYS')
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth) | Get all ssh keys associated with your account. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/ssh.py#L18-L22 | null | class SSH(object):
""" This class provide ssh-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def get(self, key_id=None):
""" Get one of the ssh keys associated with your account.
"""
url = self.bitbucket.url('GET_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, key=None, label=None):
""" Associate an ssh key with your account and return it.
"""
key = '%s' % key
url = self.bitbucket.url('SET_SSH_KEY')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, key=key, label=label)
def delete(self, key_id=None):
""" Delete one of the ssh keys associated with your account.
Please use with caution as there is NO confimation and NO undo.
"""
url = self.bitbucket.url('DELETE_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
Sheeprider/BitBucket-api | bitbucket/ssh.py | SSH.get | python | def get(self, key_id=None):
url = self.bitbucket.url('GET_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth) | Get one of the ssh keys associated with your account. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/ssh.py#L24-L28 | null | class SSH(object):
""" This class provide ssh-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def all(self):
""" Get all ssh keys associated with your account.
"""
url = self.bitbucket.url('GET_SSH_KEYS')
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, key=None, label=None):
""" Associate an ssh key with your account and return it.
"""
key = '%s' % key
url = self.bitbucket.url('SET_SSH_KEY')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, key=key, label=label)
def delete(self, key_id=None):
""" Delete one of the ssh keys associated with your account.
Please use with caution as there is NO confimation and NO undo.
"""
url = self.bitbucket.url('DELETE_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
Sheeprider/BitBucket-api | bitbucket/ssh.py | SSH.create | python | def create(self, key=None, label=None):
key = '%s' % key
url = self.bitbucket.url('SET_SSH_KEY')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, key=key, label=label) | Associate an ssh key with your account and return it. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/ssh.py#L30-L35 | null | class SSH(object):
""" This class provide ssh-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def all(self):
""" Get all ssh keys associated with your account.
"""
url = self.bitbucket.url('GET_SSH_KEYS')
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, key_id=None):
""" Get one of the ssh keys associated with your account.
"""
url = self.bitbucket.url('GET_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def delete(self, key_id=None):
""" Delete one of the ssh keys associated with your account.
Please use with caution as there is NO confimation and NO undo.
"""
url = self.bitbucket.url('DELETE_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
Sheeprider/BitBucket-api | bitbucket/ssh.py | SSH.delete | python | def delete(self, key_id=None):
url = self.bitbucket.url('DELETE_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth) | Delete one of the ssh keys associated with your account.
Please use with caution as there is NO confimation and NO undo. | train | https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/ssh.py#L37-L42 | null | class SSH(object):
""" This class provide ssh-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def all(self):
""" Get all ssh keys associated with your account.
"""
url = self.bitbucket.url('GET_SSH_KEYS')
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, key_id=None):
""" Get one of the ssh keys associated with your account.
"""
url = self.bitbucket.url('GET_SSH_KEY', key_id=key_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, key=None, label=None):
""" Associate an ssh key with your account and return it.
"""
key = '%s' % key
url = self.bitbucket.url('SET_SSH_KEY')
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, key=key, label=label)
|
dmcc/PyStanfordDependencies | StanfordDependencies/StanfordDependencies.py | StanfordDependencies.convert_trees | python | def convert_trees(self, ptb_trees, representation='basic', universal=True,
include_punct=True, include_erased=False, **kwargs):
kwargs.update(representation=representation, universal=universal,
include_punct=include_punct,
include_erased=include_erased)
return Corpus(self.convert_tree(ptb_tree, **kwargs)
for ptb_tree in ptb_trees) | Convert a list of Penn Treebank formatted strings (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences (CoNLL.Corpus), where each sentence
(CoNLL.Sentence) is itself a list of CoNLL.Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Additional arguments: universal (if True, use universal
dependencies if they're available), include_punct (if False,
punctuation tokens will not be included), and include_erased
(if False and your representation might erase tokens, those
tokens will be omitted from the output).
See documentation on your backend to see if it supports
further options. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/StanfordDependencies.py#L92-L116 | null | class StanfordDependencies:
"""Abstract base class for converting Penn Treebank trees to Stanford
Dependencies. To actually use this, you'll want to instantiate one
of the backends. The easiest way to do this is via the get_instance()
helper method.
If you do not currently have the appropriate Java jar files, the
download_if_missing flag in the constructor will help you fetch them.
In this case, you can set version to a string with the CoreNLP version
you'd like. If unset, it will default to DEFAULT_CORENLP_VERSION.
Subclasses should (at minimum) override the convert_tree method. They
may also want to override convert_trees if they require batch
operation. They may also add their own custom keyword arguments to
__init__(), convert_tree(), and convert_trees()."""
__metaclass__ = ABCMeta
def __init__(self, jar_filename=None, download_if_missing=False,
version=None):
"""jar_filename should be the path to a Java jar file with
classfiles from Stanford CoreNLP or Stanford Parser.
If download_if_missing is True, it will automatically download
a jar file and store it locally. By default it will use
DEFAULT_CORENLP_VERSION but will use the version flag if
that argument is specified."""
if not (jar_filename or version is not None or download_if_missing):
raise ValueError("Must set either jar_filename, version, "
"or download_if_missing to True.")
self.jar_filename = jar_filename
if not self.jar_filename:
if version is None:
version = DEFAULT_CORENLP_VERSION
filename = 'stanford-corenlp-%s.jar' % version
self.jar_filename = self.setup_and_get_default_path(filename)
if download_if_missing:
self.download_if_missing(version)
@abstractmethod
def convert_tree(self, ptb_tree, representation='basic', **kwargs):
"""Converts a single Penn Treebank format tree to Stanford
Dependencies. With some backends, this can be considerably
slower than using convert_trees, so consider that if you're
doing a batch conversion. See convert_trees for more details
and a listing of possible kwargs."""
def setup_and_get_default_path(self, jar_base_filename):
"""Determine the user-specific install path for the Stanford
Dependencies jar if the jar_url is not specified and ensure that
it is writable (that is, make sure the directory exists). Returns
the full path for where the jar file should be installed."""
import os
import errno
install_dir = os.path.expanduser(INSTALL_DIR)
try:
os.makedirs(install_dir)
except OSError as ose:
if ose.errno != errno.EEXIST:
raise ose
jar_filename = os.path.join(install_dir, jar_base_filename)
return jar_filename
def download_if_missing(self, version=None, verbose=True):
"""Download the jar for version into the jar_filename specified
in the constructor. Will not overwrite jar_filename if it already
exists. version defaults to DEFAULT_CORENLP_VERSION (ideally the
latest but we can't guarantee that since PyStanfordDependencies
is distributed separately)."""
if os.path.exists(self.jar_filename):
return
jar_url = self.get_jar_url(version)
if verbose:
print("Downloading %r -> %r" % (jar_url, self.jar_filename))
opener = ErrorAwareURLOpener()
opener.retrieve(jar_url, filename=self.jar_filename)
@staticmethod
def _raise_on_bad_representation(representation):
"""Ensure that representation is a known Stanford Dependency
representation (raises a ValueError if the representation is
invalid)."""
if representation not in REPRESENTATIONS:
repr_desc = ', '.join(map(repr, REPRESENTATIONS))
raise ValueError("Unknown representation: %r (should be one "
"of %s)" % (representation, repr_desc))
@staticmethod
def _raise_on_bad_input(ptb_tree):
"""Ensure that ptb_tree is a valid Penn Treebank datatype or
raises a TypeError. Currently, this requires that ptb_tree is
a str or basestring (depending on Python version)."""
if not isinstance(ptb_tree, string_type):
raise TypeError("ptb_tree is not a string: %r" % ptb_tree)
@staticmethod
def _raise_on_bad_jar_filename(jar_filename):
"""Ensure that jar_filename is a valid path to a jar file."""
if jar_filename is None:
return
if not isinstance(jar_filename, string_type):
raise TypeError("jar_filename is not a string: %r" % jar_filename)
if not os.path.exists(jar_filename):
raise ValueError("jar_filename does not exist: %r" % jar_filename)
@staticmethod
def get_jar_url(version=None):
"""Get the URL to a Stanford CoreNLP jar file with a specific
version. These jars come from Maven since the Maven version is
smaller than the full CoreNLP distributions. Defaults to
DEFAULT_CORENLP_VERSION."""
if version is None:
version = DEFAULT_CORENLP_VERSION
try:
string_type = basestring
except NameError:
string_type = str
if not isinstance(version, string_type):
raise TypeError("Version must be a string or None (got %r)." %
version)
jar_filename = 'stanford-corenlp-%s.jar' % version
return 'http://search.maven.org/remotecontent?filepath=' + \
'edu/stanford/nlp/stanford-corenlp/%s/%s' % (version,
jar_filename)
@staticmethod
def get_instance(jar_filename=None, version=None,
download_if_missing=True, backend='jpype',
**extra_args):
"""This is the typical mechanism of constructing a
StanfordDependencies instance. The backend parameter determines
which backend to load (currently can be 'subprocess' or 'jpype').
To determine which jar file is used, you must specify
jar_filename, download_if_missing=True, and/or version.
- If jar_filename is specified, that jar is used and the other two
flags are ignored.
- Otherwise, if download_if_missing, we will download a jar file
from the Maven repository. This jar file will be the latest
known version of CoreNLP unless the version flag is specified
(e.g., version='3.4.1') in which case we'll attempt to download
and use that version. Once downloaded, it will be stored in
your home directory and not downloaded again.
- If jar_filename and download_if_missing are not specified,
version must be set to a version previously downloaded in the
above step.
All remaining keyword arguments are passes on to the
StanfordDependencies backend constructor.
If the above options are confusing, don't panic! You can leave
them all blank -- get_instance() is designed to provide the best
and latest available conversion settings by default."""
StanfordDependencies._raise_on_bad_jar_filename(jar_filename)
extra_args.update(jar_filename=jar_filename,
download_if_missing=download_if_missing,
version=version)
if backend == 'jpype':
try:
from .JPypeBackend import JPypeBackend
return JPypeBackend(**extra_args)
except ImportError:
warnings.warn('Error importing JPypeBackend, '
'falling back to SubprocessBackend.')
backend = 'subprocess'
except RuntimeError as r:
warnings.warn('RuntimeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % r[0])
backend = 'subprocess'
except TypeError as t:
warnings.warn('TypeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % t[0])
backend = 'subprocess'
if backend == 'subprocess':
from .SubprocessBackend import SubprocessBackend
return SubprocessBackend(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess' and 'jpype')" % backend)
|
dmcc/PyStanfordDependencies | StanfordDependencies/StanfordDependencies.py | StanfordDependencies.setup_and_get_default_path | python | def setup_and_get_default_path(self, jar_base_filename):
import os
import errno
install_dir = os.path.expanduser(INSTALL_DIR)
try:
os.makedirs(install_dir)
except OSError as ose:
if ose.errno != errno.EEXIST:
raise ose
jar_filename = os.path.join(install_dir, jar_base_filename)
return jar_filename | Determine the user-specific install path for the Stanford
Dependencies jar if the jar_url is not specified and ensure that
it is writable (that is, make sure the directory exists). Returns
the full path for where the jar file should be installed. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/StanfordDependencies.py#L126-L140 | null | class StanfordDependencies:
"""Abstract base class for converting Penn Treebank trees to Stanford
Dependencies. To actually use this, you'll want to instantiate one
of the backends. The easiest way to do this is via the get_instance()
helper method.
If you do not currently have the appropriate Java jar files, the
download_if_missing flag in the constructor will help you fetch them.
In this case, you can set version to a string with the CoreNLP version
you'd like. If unset, it will default to DEFAULT_CORENLP_VERSION.
Subclasses should (at minimum) override the convert_tree method. They
may also want to override convert_trees if they require batch
operation. They may also add their own custom keyword arguments to
__init__(), convert_tree(), and convert_trees()."""
__metaclass__ = ABCMeta
def __init__(self, jar_filename=None, download_if_missing=False,
version=None):
"""jar_filename should be the path to a Java jar file with
classfiles from Stanford CoreNLP or Stanford Parser.
If download_if_missing is True, it will automatically download
a jar file and store it locally. By default it will use
DEFAULT_CORENLP_VERSION but will use the version flag if
that argument is specified."""
if not (jar_filename or version is not None or download_if_missing):
raise ValueError("Must set either jar_filename, version, "
"or download_if_missing to True.")
self.jar_filename = jar_filename
if not self.jar_filename:
if version is None:
version = DEFAULT_CORENLP_VERSION
filename = 'stanford-corenlp-%s.jar' % version
self.jar_filename = self.setup_and_get_default_path(filename)
if download_if_missing:
self.download_if_missing(version)
def convert_trees(self, ptb_trees, representation='basic', universal=True,
include_punct=True, include_erased=False, **kwargs):
"""Convert a list of Penn Treebank formatted strings (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences (CoNLL.Corpus), where each sentence
(CoNLL.Sentence) is itself a list of CoNLL.Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Additional arguments: universal (if True, use universal
dependencies if they're available), include_punct (if False,
punctuation tokens will not be included), and include_erased
(if False and your representation might erase tokens, those
tokens will be omitted from the output).
See documentation on your backend to see if it supports
further options."""
kwargs.update(representation=representation, universal=universal,
include_punct=include_punct,
include_erased=include_erased)
return Corpus(self.convert_tree(ptb_tree, **kwargs)
for ptb_tree in ptb_trees)
@abstractmethod
def convert_tree(self, ptb_tree, representation='basic', **kwargs):
"""Converts a single Penn Treebank format tree to Stanford
Dependencies. With some backends, this can be considerably
slower than using convert_trees, so consider that if you're
doing a batch conversion. See convert_trees for more details
and a listing of possible kwargs."""
def download_if_missing(self, version=None, verbose=True):
"""Download the jar for version into the jar_filename specified
in the constructor. Will not overwrite jar_filename if it already
exists. version defaults to DEFAULT_CORENLP_VERSION (ideally the
latest but we can't guarantee that since PyStanfordDependencies
is distributed separately)."""
if os.path.exists(self.jar_filename):
return
jar_url = self.get_jar_url(version)
if verbose:
print("Downloading %r -> %r" % (jar_url, self.jar_filename))
opener = ErrorAwareURLOpener()
opener.retrieve(jar_url, filename=self.jar_filename)
@staticmethod
def _raise_on_bad_representation(representation):
"""Ensure that representation is a known Stanford Dependency
representation (raises a ValueError if the representation is
invalid)."""
if representation not in REPRESENTATIONS:
repr_desc = ', '.join(map(repr, REPRESENTATIONS))
raise ValueError("Unknown representation: %r (should be one "
"of %s)" % (representation, repr_desc))
@staticmethod
def _raise_on_bad_input(ptb_tree):
"""Ensure that ptb_tree is a valid Penn Treebank datatype or
raises a TypeError. Currently, this requires that ptb_tree is
a str or basestring (depending on Python version)."""
if not isinstance(ptb_tree, string_type):
raise TypeError("ptb_tree is not a string: %r" % ptb_tree)
@staticmethod
def _raise_on_bad_jar_filename(jar_filename):
"""Ensure that jar_filename is a valid path to a jar file."""
if jar_filename is None:
return
if not isinstance(jar_filename, string_type):
raise TypeError("jar_filename is not a string: %r" % jar_filename)
if not os.path.exists(jar_filename):
raise ValueError("jar_filename does not exist: %r" % jar_filename)
@staticmethod
def get_jar_url(version=None):
"""Get the URL to a Stanford CoreNLP jar file with a specific
version. These jars come from Maven since the Maven version is
smaller than the full CoreNLP distributions. Defaults to
DEFAULT_CORENLP_VERSION."""
if version is None:
version = DEFAULT_CORENLP_VERSION
try:
string_type = basestring
except NameError:
string_type = str
if not isinstance(version, string_type):
raise TypeError("Version must be a string or None (got %r)." %
version)
jar_filename = 'stanford-corenlp-%s.jar' % version
return 'http://search.maven.org/remotecontent?filepath=' + \
'edu/stanford/nlp/stanford-corenlp/%s/%s' % (version,
jar_filename)
@staticmethod
def get_instance(jar_filename=None, version=None,
download_if_missing=True, backend='jpype',
**extra_args):
"""This is the typical mechanism of constructing a
StanfordDependencies instance. The backend parameter determines
which backend to load (currently can be 'subprocess' or 'jpype').
To determine which jar file is used, you must specify
jar_filename, download_if_missing=True, and/or version.
- If jar_filename is specified, that jar is used and the other two
flags are ignored.
- Otherwise, if download_if_missing, we will download a jar file
from the Maven repository. This jar file will be the latest
known version of CoreNLP unless the version flag is specified
(e.g., version='3.4.1') in which case we'll attempt to download
and use that version. Once downloaded, it will be stored in
your home directory and not downloaded again.
- If jar_filename and download_if_missing are not specified,
version must be set to a version previously downloaded in the
above step.
All remaining keyword arguments are passes on to the
StanfordDependencies backend constructor.
If the above options are confusing, don't panic! You can leave
them all blank -- get_instance() is designed to provide the best
and latest available conversion settings by default."""
StanfordDependencies._raise_on_bad_jar_filename(jar_filename)
extra_args.update(jar_filename=jar_filename,
download_if_missing=download_if_missing,
version=version)
if backend == 'jpype':
try:
from .JPypeBackend import JPypeBackend
return JPypeBackend(**extra_args)
except ImportError:
warnings.warn('Error importing JPypeBackend, '
'falling back to SubprocessBackend.')
backend = 'subprocess'
except RuntimeError as r:
warnings.warn('RuntimeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % r[0])
backend = 'subprocess'
except TypeError as t:
warnings.warn('TypeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % t[0])
backend = 'subprocess'
if backend == 'subprocess':
from .SubprocessBackend import SubprocessBackend
return SubprocessBackend(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess' and 'jpype')" % backend)
|
dmcc/PyStanfordDependencies | StanfordDependencies/StanfordDependencies.py | StanfordDependencies.download_if_missing | python | def download_if_missing(self, version=None, verbose=True):
if os.path.exists(self.jar_filename):
return
jar_url = self.get_jar_url(version)
if verbose:
print("Downloading %r -> %r" % (jar_url, self.jar_filename))
opener = ErrorAwareURLOpener()
opener.retrieve(jar_url, filename=self.jar_filename) | Download the jar for version into the jar_filename specified
in the constructor. Will not overwrite jar_filename if it already
exists. version defaults to DEFAULT_CORENLP_VERSION (ideally the
latest but we can't guarantee that since PyStanfordDependencies
is distributed separately). | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/StanfordDependencies.py#L141-L154 | null | class StanfordDependencies:
"""Abstract base class for converting Penn Treebank trees to Stanford
Dependencies. To actually use this, you'll want to instantiate one
of the backends. The easiest way to do this is via the get_instance()
helper method.
If you do not currently have the appropriate Java jar files, the
download_if_missing flag in the constructor will help you fetch them.
In this case, you can set version to a string with the CoreNLP version
you'd like. If unset, it will default to DEFAULT_CORENLP_VERSION.
Subclasses should (at minimum) override the convert_tree method. They
may also want to override convert_trees if they require batch
operation. They may also add their own custom keyword arguments to
__init__(), convert_tree(), and convert_trees()."""
__metaclass__ = ABCMeta
def __init__(self, jar_filename=None, download_if_missing=False,
version=None):
"""jar_filename should be the path to a Java jar file with
classfiles from Stanford CoreNLP or Stanford Parser.
If download_if_missing is True, it will automatically download
a jar file and store it locally. By default it will use
DEFAULT_CORENLP_VERSION but will use the version flag if
that argument is specified."""
if not (jar_filename or version is not None or download_if_missing):
raise ValueError("Must set either jar_filename, version, "
"or download_if_missing to True.")
self.jar_filename = jar_filename
if not self.jar_filename:
if version is None:
version = DEFAULT_CORENLP_VERSION
filename = 'stanford-corenlp-%s.jar' % version
self.jar_filename = self.setup_and_get_default_path(filename)
if download_if_missing:
self.download_if_missing(version)
def convert_trees(self, ptb_trees, representation='basic', universal=True,
include_punct=True, include_erased=False, **kwargs):
"""Convert a list of Penn Treebank formatted strings (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences (CoNLL.Corpus), where each sentence
(CoNLL.Sentence) is itself a list of CoNLL.Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Additional arguments: universal (if True, use universal
dependencies if they're available), include_punct (if False,
punctuation tokens will not be included), and include_erased
(if False and your representation might erase tokens, those
tokens will be omitted from the output).
See documentation on your backend to see if it supports
further options."""
kwargs.update(representation=representation, universal=universal,
include_punct=include_punct,
include_erased=include_erased)
return Corpus(self.convert_tree(ptb_tree, **kwargs)
for ptb_tree in ptb_trees)
@abstractmethod
def convert_tree(self, ptb_tree, representation='basic', **kwargs):
"""Converts a single Penn Treebank format tree to Stanford
Dependencies. With some backends, this can be considerably
slower than using convert_trees, so consider that if you're
doing a batch conversion. See convert_trees for more details
and a listing of possible kwargs."""
def setup_and_get_default_path(self, jar_base_filename):
"""Determine the user-specific install path for the Stanford
Dependencies jar if the jar_url is not specified and ensure that
it is writable (that is, make sure the directory exists). Returns
the full path for where the jar file should be installed."""
import os
import errno
install_dir = os.path.expanduser(INSTALL_DIR)
try:
os.makedirs(install_dir)
except OSError as ose:
if ose.errno != errno.EEXIST:
raise ose
jar_filename = os.path.join(install_dir, jar_base_filename)
return jar_filename
@staticmethod
def _raise_on_bad_representation(representation):
"""Ensure that representation is a known Stanford Dependency
representation (raises a ValueError if the representation is
invalid)."""
if representation not in REPRESENTATIONS:
repr_desc = ', '.join(map(repr, REPRESENTATIONS))
raise ValueError("Unknown representation: %r (should be one "
"of %s)" % (representation, repr_desc))
@staticmethod
def _raise_on_bad_input(ptb_tree):
"""Ensure that ptb_tree is a valid Penn Treebank datatype or
raises a TypeError. Currently, this requires that ptb_tree is
a str or basestring (depending on Python version)."""
if not isinstance(ptb_tree, string_type):
raise TypeError("ptb_tree is not a string: %r" % ptb_tree)
@staticmethod
def _raise_on_bad_jar_filename(jar_filename):
"""Ensure that jar_filename is a valid path to a jar file."""
if jar_filename is None:
return
if not isinstance(jar_filename, string_type):
raise TypeError("jar_filename is not a string: %r" % jar_filename)
if not os.path.exists(jar_filename):
raise ValueError("jar_filename does not exist: %r" % jar_filename)
@staticmethod
def get_jar_url(version=None):
"""Get the URL to a Stanford CoreNLP jar file with a specific
version. These jars come from Maven since the Maven version is
smaller than the full CoreNLP distributions. Defaults to
DEFAULT_CORENLP_VERSION."""
if version is None:
version = DEFAULT_CORENLP_VERSION
try:
string_type = basestring
except NameError:
string_type = str
if not isinstance(version, string_type):
raise TypeError("Version must be a string or None (got %r)." %
version)
jar_filename = 'stanford-corenlp-%s.jar' % version
return 'http://search.maven.org/remotecontent?filepath=' + \
'edu/stanford/nlp/stanford-corenlp/%s/%s' % (version,
jar_filename)
@staticmethod
def get_instance(jar_filename=None, version=None,
download_if_missing=True, backend='jpype',
**extra_args):
"""This is the typical mechanism of constructing a
StanfordDependencies instance. The backend parameter determines
which backend to load (currently can be 'subprocess' or 'jpype').
To determine which jar file is used, you must specify
jar_filename, download_if_missing=True, and/or version.
- If jar_filename is specified, that jar is used and the other two
flags are ignored.
- Otherwise, if download_if_missing, we will download a jar file
from the Maven repository. This jar file will be the latest
known version of CoreNLP unless the version flag is specified
(e.g., version='3.4.1') in which case we'll attempt to download
and use that version. Once downloaded, it will be stored in
your home directory and not downloaded again.
- If jar_filename and download_if_missing are not specified,
version must be set to a version previously downloaded in the
above step.
All remaining keyword arguments are passes on to the
StanfordDependencies backend constructor.
If the above options are confusing, don't panic! You can leave
them all blank -- get_instance() is designed to provide the best
and latest available conversion settings by default."""
StanfordDependencies._raise_on_bad_jar_filename(jar_filename)
extra_args.update(jar_filename=jar_filename,
download_if_missing=download_if_missing,
version=version)
if backend == 'jpype':
try:
from .JPypeBackend import JPypeBackend
return JPypeBackend(**extra_args)
except ImportError:
warnings.warn('Error importing JPypeBackend, '
'falling back to SubprocessBackend.')
backend = 'subprocess'
except RuntimeError as r:
warnings.warn('RuntimeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % r[0])
backend = 'subprocess'
except TypeError as t:
warnings.warn('TypeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % t[0])
backend = 'subprocess'
if backend == 'subprocess':
from .SubprocessBackend import SubprocessBackend
return SubprocessBackend(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess' and 'jpype')" % backend)
|
dmcc/PyStanfordDependencies | StanfordDependencies/StanfordDependencies.py | StanfordDependencies._raise_on_bad_representation | python | def _raise_on_bad_representation(representation):
if representation not in REPRESENTATIONS:
repr_desc = ', '.join(map(repr, REPRESENTATIONS))
raise ValueError("Unknown representation: %r (should be one "
"of %s)" % (representation, repr_desc)) | Ensure that representation is a known Stanford Dependency
representation (raises a ValueError if the representation is
invalid). | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/StanfordDependencies.py#L157-L164 | null | class StanfordDependencies:
"""Abstract base class for converting Penn Treebank trees to Stanford
Dependencies. To actually use this, you'll want to instantiate one
of the backends. The easiest way to do this is via the get_instance()
helper method.
If you do not currently have the appropriate Java jar files, the
download_if_missing flag in the constructor will help you fetch them.
In this case, you can set version to a string with the CoreNLP version
you'd like. If unset, it will default to DEFAULT_CORENLP_VERSION.
Subclasses should (at minimum) override the convert_tree method. They
may also want to override convert_trees if they require batch
operation. They may also add their own custom keyword arguments to
__init__(), convert_tree(), and convert_trees()."""
__metaclass__ = ABCMeta
def __init__(self, jar_filename=None, download_if_missing=False,
version=None):
"""jar_filename should be the path to a Java jar file with
classfiles from Stanford CoreNLP or Stanford Parser.
If download_if_missing is True, it will automatically download
a jar file and store it locally. By default it will use
DEFAULT_CORENLP_VERSION but will use the version flag if
that argument is specified."""
if not (jar_filename or version is not None or download_if_missing):
raise ValueError("Must set either jar_filename, version, "
"or download_if_missing to True.")
self.jar_filename = jar_filename
if not self.jar_filename:
if version is None:
version = DEFAULT_CORENLP_VERSION
filename = 'stanford-corenlp-%s.jar' % version
self.jar_filename = self.setup_and_get_default_path(filename)
if download_if_missing:
self.download_if_missing(version)
def convert_trees(self, ptb_trees, representation='basic', universal=True,
include_punct=True, include_erased=False, **kwargs):
"""Convert a list of Penn Treebank formatted strings (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences (CoNLL.Corpus), where each sentence
(CoNLL.Sentence) is itself a list of CoNLL.Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Additional arguments: universal (if True, use universal
dependencies if they're available), include_punct (if False,
punctuation tokens will not be included), and include_erased
(if False and your representation might erase tokens, those
tokens will be omitted from the output).
See documentation on your backend to see if it supports
further options."""
kwargs.update(representation=representation, universal=universal,
include_punct=include_punct,
include_erased=include_erased)
return Corpus(self.convert_tree(ptb_tree, **kwargs)
for ptb_tree in ptb_trees)
@abstractmethod
def convert_tree(self, ptb_tree, representation='basic', **kwargs):
"""Converts a single Penn Treebank format tree to Stanford
Dependencies. With some backends, this can be considerably
slower than using convert_trees, so consider that if you're
doing a batch conversion. See convert_trees for more details
and a listing of possible kwargs."""
def setup_and_get_default_path(self, jar_base_filename):
"""Determine the user-specific install path for the Stanford
Dependencies jar if the jar_url is not specified and ensure that
it is writable (that is, make sure the directory exists). Returns
the full path for where the jar file should be installed."""
import os
import errno
install_dir = os.path.expanduser(INSTALL_DIR)
try:
os.makedirs(install_dir)
except OSError as ose:
if ose.errno != errno.EEXIST:
raise ose
jar_filename = os.path.join(install_dir, jar_base_filename)
return jar_filename
def download_if_missing(self, version=None, verbose=True):
"""Download the jar for version into the jar_filename specified
in the constructor. Will not overwrite jar_filename if it already
exists. version defaults to DEFAULT_CORENLP_VERSION (ideally the
latest but we can't guarantee that since PyStanfordDependencies
is distributed separately)."""
if os.path.exists(self.jar_filename):
return
jar_url = self.get_jar_url(version)
if verbose:
print("Downloading %r -> %r" % (jar_url, self.jar_filename))
opener = ErrorAwareURLOpener()
opener.retrieve(jar_url, filename=self.jar_filename)
@staticmethod
@staticmethod
def _raise_on_bad_input(ptb_tree):
"""Ensure that ptb_tree is a valid Penn Treebank datatype or
raises a TypeError. Currently, this requires that ptb_tree is
a str or basestring (depending on Python version)."""
if not isinstance(ptb_tree, string_type):
raise TypeError("ptb_tree is not a string: %r" % ptb_tree)
@staticmethod
def _raise_on_bad_jar_filename(jar_filename):
"""Ensure that jar_filename is a valid path to a jar file."""
if jar_filename is None:
return
if not isinstance(jar_filename, string_type):
raise TypeError("jar_filename is not a string: %r" % jar_filename)
if not os.path.exists(jar_filename):
raise ValueError("jar_filename does not exist: %r" % jar_filename)
@staticmethod
def get_jar_url(version=None):
"""Get the URL to a Stanford CoreNLP jar file with a specific
version. These jars come from Maven since the Maven version is
smaller than the full CoreNLP distributions. Defaults to
DEFAULT_CORENLP_VERSION."""
if version is None:
version = DEFAULT_CORENLP_VERSION
try:
string_type = basestring
except NameError:
string_type = str
if not isinstance(version, string_type):
raise TypeError("Version must be a string or None (got %r)." %
version)
jar_filename = 'stanford-corenlp-%s.jar' % version
return 'http://search.maven.org/remotecontent?filepath=' + \
'edu/stanford/nlp/stanford-corenlp/%s/%s' % (version,
jar_filename)
@staticmethod
def get_instance(jar_filename=None, version=None,
download_if_missing=True, backend='jpype',
**extra_args):
"""This is the typical mechanism of constructing a
StanfordDependencies instance. The backend parameter determines
which backend to load (currently can be 'subprocess' or 'jpype').
To determine which jar file is used, you must specify
jar_filename, download_if_missing=True, and/or version.
- If jar_filename is specified, that jar is used and the other two
flags are ignored.
- Otherwise, if download_if_missing, we will download a jar file
from the Maven repository. This jar file will be the latest
known version of CoreNLP unless the version flag is specified
(e.g., version='3.4.1') in which case we'll attempt to download
and use that version. Once downloaded, it will be stored in
your home directory and not downloaded again.
- If jar_filename and download_if_missing are not specified,
version must be set to a version previously downloaded in the
above step.
All remaining keyword arguments are passes on to the
StanfordDependencies backend constructor.
If the above options are confusing, don't panic! You can leave
them all blank -- get_instance() is designed to provide the best
and latest available conversion settings by default."""
StanfordDependencies._raise_on_bad_jar_filename(jar_filename)
extra_args.update(jar_filename=jar_filename,
download_if_missing=download_if_missing,
version=version)
if backend == 'jpype':
try:
from .JPypeBackend import JPypeBackend
return JPypeBackend(**extra_args)
except ImportError:
warnings.warn('Error importing JPypeBackend, '
'falling back to SubprocessBackend.')
backend = 'subprocess'
except RuntimeError as r:
warnings.warn('RuntimeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % r[0])
backend = 'subprocess'
except TypeError as t:
warnings.warn('TypeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % t[0])
backend = 'subprocess'
if backend == 'subprocess':
from .SubprocessBackend import SubprocessBackend
return SubprocessBackend(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess' and 'jpype')" % backend)
|
dmcc/PyStanfordDependencies | StanfordDependencies/StanfordDependencies.py | StanfordDependencies._raise_on_bad_jar_filename | python | def _raise_on_bad_jar_filename(jar_filename):
if jar_filename is None:
return
if not isinstance(jar_filename, string_type):
raise TypeError("jar_filename is not a string: %r" % jar_filename)
if not os.path.exists(jar_filename):
raise ValueError("jar_filename does not exist: %r" % jar_filename) | Ensure that jar_filename is a valid path to a jar file. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/StanfordDependencies.py#L175-L184 | null | class StanfordDependencies:
"""Abstract base class for converting Penn Treebank trees to Stanford
Dependencies. To actually use this, you'll want to instantiate one
of the backends. The easiest way to do this is via the get_instance()
helper method.
If you do not currently have the appropriate Java jar files, the
download_if_missing flag in the constructor will help you fetch them.
In this case, you can set version to a string with the CoreNLP version
you'd like. If unset, it will default to DEFAULT_CORENLP_VERSION.
Subclasses should (at minimum) override the convert_tree method. They
may also want to override convert_trees if they require batch
operation. They may also add their own custom keyword arguments to
__init__(), convert_tree(), and convert_trees()."""
__metaclass__ = ABCMeta
def __init__(self, jar_filename=None, download_if_missing=False,
version=None):
"""jar_filename should be the path to a Java jar file with
classfiles from Stanford CoreNLP or Stanford Parser.
If download_if_missing is True, it will automatically download
a jar file and store it locally. By default it will use
DEFAULT_CORENLP_VERSION but will use the version flag if
that argument is specified."""
if not (jar_filename or version is not None or download_if_missing):
raise ValueError("Must set either jar_filename, version, "
"or download_if_missing to True.")
self.jar_filename = jar_filename
if not self.jar_filename:
if version is None:
version = DEFAULT_CORENLP_VERSION
filename = 'stanford-corenlp-%s.jar' % version
self.jar_filename = self.setup_and_get_default_path(filename)
if download_if_missing:
self.download_if_missing(version)
def convert_trees(self, ptb_trees, representation='basic', universal=True,
include_punct=True, include_erased=False, **kwargs):
"""Convert a list of Penn Treebank formatted strings (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences (CoNLL.Corpus), where each sentence
(CoNLL.Sentence) is itself a list of CoNLL.Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Additional arguments: universal (if True, use universal
dependencies if they're available), include_punct (if False,
punctuation tokens will not be included), and include_erased
(if False and your representation might erase tokens, those
tokens will be omitted from the output).
See documentation on your backend to see if it supports
further options."""
kwargs.update(representation=representation, universal=universal,
include_punct=include_punct,
include_erased=include_erased)
return Corpus(self.convert_tree(ptb_tree, **kwargs)
for ptb_tree in ptb_trees)
@abstractmethod
def convert_tree(self, ptb_tree, representation='basic', **kwargs):
"""Converts a single Penn Treebank format tree to Stanford
Dependencies. With some backends, this can be considerably
slower than using convert_trees, so consider that if you're
doing a batch conversion. See convert_trees for more details
and a listing of possible kwargs."""
def setup_and_get_default_path(self, jar_base_filename):
"""Determine the user-specific install path for the Stanford
Dependencies jar if the jar_url is not specified and ensure that
it is writable (that is, make sure the directory exists). Returns
the full path for where the jar file should be installed."""
import os
import errno
install_dir = os.path.expanduser(INSTALL_DIR)
try:
os.makedirs(install_dir)
except OSError as ose:
if ose.errno != errno.EEXIST:
raise ose
jar_filename = os.path.join(install_dir, jar_base_filename)
return jar_filename
def download_if_missing(self, version=None, verbose=True):
"""Download the jar for version into the jar_filename specified
in the constructor. Will not overwrite jar_filename if it already
exists. version defaults to DEFAULT_CORENLP_VERSION (ideally the
latest but we can't guarantee that since PyStanfordDependencies
is distributed separately)."""
if os.path.exists(self.jar_filename):
return
jar_url = self.get_jar_url(version)
if verbose:
print("Downloading %r -> %r" % (jar_url, self.jar_filename))
opener = ErrorAwareURLOpener()
opener.retrieve(jar_url, filename=self.jar_filename)
@staticmethod
def _raise_on_bad_representation(representation):
"""Ensure that representation is a known Stanford Dependency
representation (raises a ValueError if the representation is
invalid)."""
if representation not in REPRESENTATIONS:
repr_desc = ', '.join(map(repr, REPRESENTATIONS))
raise ValueError("Unknown representation: %r (should be one "
"of %s)" % (representation, repr_desc))
@staticmethod
def _raise_on_bad_input(ptb_tree):
"""Ensure that ptb_tree is a valid Penn Treebank datatype or
raises a TypeError. Currently, this requires that ptb_tree is
a str or basestring (depending on Python version)."""
if not isinstance(ptb_tree, string_type):
raise TypeError("ptb_tree is not a string: %r" % ptb_tree)
@staticmethod
@staticmethod
def get_jar_url(version=None):
"""Get the URL to a Stanford CoreNLP jar file with a specific
version. These jars come from Maven since the Maven version is
smaller than the full CoreNLP distributions. Defaults to
DEFAULT_CORENLP_VERSION."""
if version is None:
version = DEFAULT_CORENLP_VERSION
try:
string_type = basestring
except NameError:
string_type = str
if not isinstance(version, string_type):
raise TypeError("Version must be a string or None (got %r)." %
version)
jar_filename = 'stanford-corenlp-%s.jar' % version
return 'http://search.maven.org/remotecontent?filepath=' + \
'edu/stanford/nlp/stanford-corenlp/%s/%s' % (version,
jar_filename)
@staticmethod
def get_instance(jar_filename=None, version=None,
download_if_missing=True, backend='jpype',
**extra_args):
"""This is the typical mechanism of constructing a
StanfordDependencies instance. The backend parameter determines
which backend to load (currently can be 'subprocess' or 'jpype').
To determine which jar file is used, you must specify
jar_filename, download_if_missing=True, and/or version.
- If jar_filename is specified, that jar is used and the other two
flags are ignored.
- Otherwise, if download_if_missing, we will download a jar file
from the Maven repository. This jar file will be the latest
known version of CoreNLP unless the version flag is specified
(e.g., version='3.4.1') in which case we'll attempt to download
and use that version. Once downloaded, it will be stored in
your home directory and not downloaded again.
- If jar_filename and download_if_missing are not specified,
version must be set to a version previously downloaded in the
above step.
All remaining keyword arguments are passes on to the
StanfordDependencies backend constructor.
If the above options are confusing, don't panic! You can leave
them all blank -- get_instance() is designed to provide the best
and latest available conversion settings by default."""
StanfordDependencies._raise_on_bad_jar_filename(jar_filename)
extra_args.update(jar_filename=jar_filename,
download_if_missing=download_if_missing,
version=version)
if backend == 'jpype':
try:
from .JPypeBackend import JPypeBackend
return JPypeBackend(**extra_args)
except ImportError:
warnings.warn('Error importing JPypeBackend, '
'falling back to SubprocessBackend.')
backend = 'subprocess'
except RuntimeError as r:
warnings.warn('RuntimeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % r[0])
backend = 'subprocess'
except TypeError as t:
warnings.warn('TypeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % t[0])
backend = 'subprocess'
if backend == 'subprocess':
from .SubprocessBackend import SubprocessBackend
return SubprocessBackend(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess' and 'jpype')" % backend)
|
dmcc/PyStanfordDependencies | StanfordDependencies/StanfordDependencies.py | StanfordDependencies.get_jar_url | python | def get_jar_url(version=None):
if version is None:
version = DEFAULT_CORENLP_VERSION
try:
string_type = basestring
except NameError:
string_type = str
if not isinstance(version, string_type):
raise TypeError("Version must be a string or None (got %r)." %
version)
jar_filename = 'stanford-corenlp-%s.jar' % version
return 'http://search.maven.org/remotecontent?filepath=' + \
'edu/stanford/nlp/stanford-corenlp/%s/%s' % (version,
jar_filename) | Get the URL to a Stanford CoreNLP jar file with a specific
version. These jars come from Maven since the Maven version is
smaller than the full CoreNLP distributions. Defaults to
DEFAULT_CORENLP_VERSION. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/StanfordDependencies.py#L187-L206 | null | class StanfordDependencies:
"""Abstract base class for converting Penn Treebank trees to Stanford
Dependencies. To actually use this, you'll want to instantiate one
of the backends. The easiest way to do this is via the get_instance()
helper method.
If you do not currently have the appropriate Java jar files, the
download_if_missing flag in the constructor will help you fetch them.
In this case, you can set version to a string with the CoreNLP version
you'd like. If unset, it will default to DEFAULT_CORENLP_VERSION.
Subclasses should (at minimum) override the convert_tree method. They
may also want to override convert_trees if they require batch
operation. They may also add their own custom keyword arguments to
__init__(), convert_tree(), and convert_trees()."""
__metaclass__ = ABCMeta
def __init__(self, jar_filename=None, download_if_missing=False,
version=None):
"""jar_filename should be the path to a Java jar file with
classfiles from Stanford CoreNLP or Stanford Parser.
If download_if_missing is True, it will automatically download
a jar file and store it locally. By default it will use
DEFAULT_CORENLP_VERSION but will use the version flag if
that argument is specified."""
if not (jar_filename or version is not None or download_if_missing):
raise ValueError("Must set either jar_filename, version, "
"or download_if_missing to True.")
self.jar_filename = jar_filename
if not self.jar_filename:
if version is None:
version = DEFAULT_CORENLP_VERSION
filename = 'stanford-corenlp-%s.jar' % version
self.jar_filename = self.setup_and_get_default_path(filename)
if download_if_missing:
self.download_if_missing(version)
def convert_trees(self, ptb_trees, representation='basic', universal=True,
include_punct=True, include_erased=False, **kwargs):
"""Convert a list of Penn Treebank formatted strings (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences (CoNLL.Corpus), where each sentence
(CoNLL.Sentence) is itself a list of CoNLL.Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Additional arguments: universal (if True, use universal
dependencies if they're available), include_punct (if False,
punctuation tokens will not be included), and include_erased
(if False and your representation might erase tokens, those
tokens will be omitted from the output).
See documentation on your backend to see if it supports
further options."""
kwargs.update(representation=representation, universal=universal,
include_punct=include_punct,
include_erased=include_erased)
return Corpus(self.convert_tree(ptb_tree, **kwargs)
for ptb_tree in ptb_trees)
@abstractmethod
def convert_tree(self, ptb_tree, representation='basic', **kwargs):
"""Converts a single Penn Treebank format tree to Stanford
Dependencies. With some backends, this can be considerably
slower than using convert_trees, so consider that if you're
doing a batch conversion. See convert_trees for more details
and a listing of possible kwargs."""
def setup_and_get_default_path(self, jar_base_filename):
"""Determine the user-specific install path for the Stanford
Dependencies jar if the jar_url is not specified and ensure that
it is writable (that is, make sure the directory exists). Returns
the full path for where the jar file should be installed."""
import os
import errno
install_dir = os.path.expanduser(INSTALL_DIR)
try:
os.makedirs(install_dir)
except OSError as ose:
if ose.errno != errno.EEXIST:
raise ose
jar_filename = os.path.join(install_dir, jar_base_filename)
return jar_filename
def download_if_missing(self, version=None, verbose=True):
"""Download the jar for version into the jar_filename specified
in the constructor. Will not overwrite jar_filename if it already
exists. version defaults to DEFAULT_CORENLP_VERSION (ideally the
latest but we can't guarantee that since PyStanfordDependencies
is distributed separately)."""
if os.path.exists(self.jar_filename):
return
jar_url = self.get_jar_url(version)
if verbose:
print("Downloading %r -> %r" % (jar_url, self.jar_filename))
opener = ErrorAwareURLOpener()
opener.retrieve(jar_url, filename=self.jar_filename)
@staticmethod
def _raise_on_bad_representation(representation):
"""Ensure that representation is a known Stanford Dependency
representation (raises a ValueError if the representation is
invalid)."""
if representation not in REPRESENTATIONS:
repr_desc = ', '.join(map(repr, REPRESENTATIONS))
raise ValueError("Unknown representation: %r (should be one "
"of %s)" % (representation, repr_desc))
@staticmethod
def _raise_on_bad_input(ptb_tree):
"""Ensure that ptb_tree is a valid Penn Treebank datatype or
raises a TypeError. Currently, this requires that ptb_tree is
a str or basestring (depending on Python version)."""
if not isinstance(ptb_tree, string_type):
raise TypeError("ptb_tree is not a string: %r" % ptb_tree)
@staticmethod
def _raise_on_bad_jar_filename(jar_filename):
"""Ensure that jar_filename is a valid path to a jar file."""
if jar_filename is None:
return
if not isinstance(jar_filename, string_type):
raise TypeError("jar_filename is not a string: %r" % jar_filename)
if not os.path.exists(jar_filename):
raise ValueError("jar_filename does not exist: %r" % jar_filename)
@staticmethod
@staticmethod
def get_instance(jar_filename=None, version=None,
download_if_missing=True, backend='jpype',
**extra_args):
"""This is the typical mechanism of constructing a
StanfordDependencies instance. The backend parameter determines
which backend to load (currently can be 'subprocess' or 'jpype').
To determine which jar file is used, you must specify
jar_filename, download_if_missing=True, and/or version.
- If jar_filename is specified, that jar is used and the other two
flags are ignored.
- Otherwise, if download_if_missing, we will download a jar file
from the Maven repository. This jar file will be the latest
known version of CoreNLP unless the version flag is specified
(e.g., version='3.4.1') in which case we'll attempt to download
and use that version. Once downloaded, it will be stored in
your home directory and not downloaded again.
- If jar_filename and download_if_missing are not specified,
version must be set to a version previously downloaded in the
above step.
All remaining keyword arguments are passes on to the
StanfordDependencies backend constructor.
If the above options are confusing, don't panic! You can leave
them all blank -- get_instance() is designed to provide the best
and latest available conversion settings by default."""
StanfordDependencies._raise_on_bad_jar_filename(jar_filename)
extra_args.update(jar_filename=jar_filename,
download_if_missing=download_if_missing,
version=version)
if backend == 'jpype':
try:
from .JPypeBackend import JPypeBackend
return JPypeBackend(**extra_args)
except ImportError:
warnings.warn('Error importing JPypeBackend, '
'falling back to SubprocessBackend.')
backend = 'subprocess'
except RuntimeError as r:
warnings.warn('RuntimeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % r[0])
backend = 'subprocess'
except TypeError as t:
warnings.warn('TypeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % t[0])
backend = 'subprocess'
if backend == 'subprocess':
from .SubprocessBackend import SubprocessBackend
return SubprocessBackend(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess' and 'jpype')" % backend)
|
dmcc/PyStanfordDependencies | StanfordDependencies/StanfordDependencies.py | StanfordDependencies.get_instance | python | def get_instance(jar_filename=None, version=None,
download_if_missing=True, backend='jpype',
**extra_args):
StanfordDependencies._raise_on_bad_jar_filename(jar_filename)
extra_args.update(jar_filename=jar_filename,
download_if_missing=download_if_missing,
version=version)
if backend == 'jpype':
try:
from .JPypeBackend import JPypeBackend
return JPypeBackend(**extra_args)
except ImportError:
warnings.warn('Error importing JPypeBackend, '
'falling back to SubprocessBackend.')
backend = 'subprocess'
except RuntimeError as r:
warnings.warn('RuntimeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % r[0])
backend = 'subprocess'
except TypeError as t:
warnings.warn('TypeError with JPypeBackend (%s), '
'falling back to SubprocessBackend.' % t[0])
backend = 'subprocess'
if backend == 'subprocess':
from .SubprocessBackend import SubprocessBackend
return SubprocessBackend(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess' and 'jpype')" % backend) | This is the typical mechanism of constructing a
StanfordDependencies instance. The backend parameter determines
which backend to load (currently can be 'subprocess' or 'jpype').
To determine which jar file is used, you must specify
jar_filename, download_if_missing=True, and/or version.
- If jar_filename is specified, that jar is used and the other two
flags are ignored.
- Otherwise, if download_if_missing, we will download a jar file
from the Maven repository. This jar file will be the latest
known version of CoreNLP unless the version flag is specified
(e.g., version='3.4.1') in which case we'll attempt to download
and use that version. Once downloaded, it will be stored in
your home directory and not downloaded again.
- If jar_filename and download_if_missing are not specified,
version must be set to a version previously downloaded in the
above step.
All remaining keyword arguments are passes on to the
StanfordDependencies backend constructor.
If the above options are confusing, don't panic! You can leave
them all blank -- get_instance() is designed to provide the best
and latest available conversion settings by default. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/StanfordDependencies.py#L209-L262 | [
"def _raise_on_bad_jar_filename(jar_filename):\n \"\"\"Ensure that jar_filename is a valid path to a jar file.\"\"\"\n if jar_filename is None:\n return\n\n if not isinstance(jar_filename, string_type):\n raise TypeError(\"jar_filename is not a string: %r\" % jar_filename)\n\n if not os.path.exists(jar_filename):\n raise ValueError(\"jar_filename does not exist: %r\" % jar_filename)\n"
] | class StanfordDependencies:
"""Abstract base class for converting Penn Treebank trees to Stanford
Dependencies. To actually use this, you'll want to instantiate one
of the backends. The easiest way to do this is via the get_instance()
helper method.
If you do not currently have the appropriate Java jar files, the
download_if_missing flag in the constructor will help you fetch them.
In this case, you can set version to a string with the CoreNLP version
you'd like. If unset, it will default to DEFAULT_CORENLP_VERSION.
Subclasses should (at minimum) override the convert_tree method. They
may also want to override convert_trees if they require batch
operation. They may also add their own custom keyword arguments to
__init__(), convert_tree(), and convert_trees()."""
__metaclass__ = ABCMeta
def __init__(self, jar_filename=None, download_if_missing=False,
version=None):
"""jar_filename should be the path to a Java jar file with
classfiles from Stanford CoreNLP or Stanford Parser.
If download_if_missing is True, it will automatically download
a jar file and store it locally. By default it will use
DEFAULT_CORENLP_VERSION but will use the version flag if
that argument is specified."""
if not (jar_filename or version is not None or download_if_missing):
raise ValueError("Must set either jar_filename, version, "
"or download_if_missing to True.")
self.jar_filename = jar_filename
if not self.jar_filename:
if version is None:
version = DEFAULT_CORENLP_VERSION
filename = 'stanford-corenlp-%s.jar' % version
self.jar_filename = self.setup_and_get_default_path(filename)
if download_if_missing:
self.download_if_missing(version)
def convert_trees(self, ptb_trees, representation='basic', universal=True,
include_punct=True, include_erased=False, **kwargs):
"""Convert a list of Penn Treebank formatted strings (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences (CoNLL.Corpus), where each sentence
(CoNLL.Sentence) is itself a list of CoNLL.Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Additional arguments: universal (if True, use universal
dependencies if they're available), include_punct (if False,
punctuation tokens will not be included), and include_erased
(if False and your representation might erase tokens, those
tokens will be omitted from the output).
See documentation on your backend to see if it supports
further options."""
kwargs.update(representation=representation, universal=universal,
include_punct=include_punct,
include_erased=include_erased)
return Corpus(self.convert_tree(ptb_tree, **kwargs)
for ptb_tree in ptb_trees)
@abstractmethod
def convert_tree(self, ptb_tree, representation='basic', **kwargs):
"""Converts a single Penn Treebank format tree to Stanford
Dependencies. With some backends, this can be considerably
slower than using convert_trees, so consider that if you're
doing a batch conversion. See convert_trees for more details
and a listing of possible kwargs."""
def setup_and_get_default_path(self, jar_base_filename):
"""Determine the user-specific install path for the Stanford
Dependencies jar if the jar_url is not specified and ensure that
it is writable (that is, make sure the directory exists). Returns
the full path for where the jar file should be installed."""
import os
import errno
install_dir = os.path.expanduser(INSTALL_DIR)
try:
os.makedirs(install_dir)
except OSError as ose:
if ose.errno != errno.EEXIST:
raise ose
jar_filename = os.path.join(install_dir, jar_base_filename)
return jar_filename
def download_if_missing(self, version=None, verbose=True):
"""Download the jar for version into the jar_filename specified
in the constructor. Will not overwrite jar_filename if it already
exists. version defaults to DEFAULT_CORENLP_VERSION (ideally the
latest but we can't guarantee that since PyStanfordDependencies
is distributed separately)."""
if os.path.exists(self.jar_filename):
return
jar_url = self.get_jar_url(version)
if verbose:
print("Downloading %r -> %r" % (jar_url, self.jar_filename))
opener = ErrorAwareURLOpener()
opener.retrieve(jar_url, filename=self.jar_filename)
@staticmethod
def _raise_on_bad_representation(representation):
"""Ensure that representation is a known Stanford Dependency
representation (raises a ValueError if the representation is
invalid)."""
if representation not in REPRESENTATIONS:
repr_desc = ', '.join(map(repr, REPRESENTATIONS))
raise ValueError("Unknown representation: %r (should be one "
"of %s)" % (representation, repr_desc))
@staticmethod
def _raise_on_bad_input(ptb_tree):
"""Ensure that ptb_tree is a valid Penn Treebank datatype or
raises a TypeError. Currently, this requires that ptb_tree is
a str or basestring (depending on Python version)."""
if not isinstance(ptb_tree, string_type):
raise TypeError("ptb_tree is not a string: %r" % ptb_tree)
@staticmethod
def _raise_on_bad_jar_filename(jar_filename):
"""Ensure that jar_filename is a valid path to a jar file."""
if jar_filename is None:
return
if not isinstance(jar_filename, string_type):
raise TypeError("jar_filename is not a string: %r" % jar_filename)
if not os.path.exists(jar_filename):
raise ValueError("jar_filename does not exist: %r" % jar_filename)
@staticmethod
def get_jar_url(version=None):
"""Get the URL to a Stanford CoreNLP jar file with a specific
version. These jars come from Maven since the Maven version is
smaller than the full CoreNLP distributions. Defaults to
DEFAULT_CORENLP_VERSION."""
if version is None:
version = DEFAULT_CORENLP_VERSION
try:
string_type = basestring
except NameError:
string_type = str
if not isinstance(version, string_type):
raise TypeError("Version must be a string or None (got %r)." %
version)
jar_filename = 'stanford-corenlp-%s.jar' % version
return 'http://search.maven.org/remotecontent?filepath=' + \
'edu/stanford/nlp/stanford-corenlp/%s/%s' % (version,
jar_filename)
@staticmethod
|
dmcc/PyStanfordDependencies | StanfordDependencies/JPypeBackend.py | JPypeBackend.convert_tree | python | def convert_tree(self, ptb_tree, representation='basic',
include_punct=True, include_erased=False,
add_lemmas=False, universal=True):
self._raise_on_bad_input(ptb_tree)
self._raise_on_bad_representation(representation)
tree = self.treeReader(ptb_tree)
if tree is None:
raise ValueError("Invalid Penn Treebank tree: %r" % ptb_tree)
deps = self._get_deps(tree, include_punct, representation,
universal=universal)
tagged_yield = self._listify(tree.taggedYield())
indices_to_words = dict(enumerate(tagged_yield, 1))
sentence = Sentence()
covered_indices = set()
def add_token(index, form, head, deprel, extra):
tag = indices_to_words[index].tag()
if add_lemmas:
lemma = self.stem(form, tag)
else:
lemma = None
token = Token(index=index, form=form, lemma=lemma, cpos=tag,
pos=tag, feats=None, head=head, deprel=deprel,
phead=None, pdeprel=None, extra=extra)
sentence.append(token)
# add token for each dependency
for dep in deps:
index = dep.dep().index()
head = dep.gov().index()
deprel = dep.reln().toString()
form = indices_to_words[index].value()
dep_is_copy = dep.dep().copyCount()
gov_is_copy = dep.gov().copyCount()
if dep_is_copy or gov_is_copy:
extra = {}
if dep_is_copy:
extra['dep_is_copy'] = dep_is_copy
if gov_is_copy:
extra['gov_is_copy'] = gov_is_copy
else:
extra = None
add_token(index, form, head, deprel, extra)
covered_indices.add(index)
if include_erased:
# see if there are any tokens that were erased
# and add them as well
all_indices = set(indices_to_words.keys())
for index in all_indices - covered_indices:
form = indices_to_words[index].value()
if not include_punct and not self.puncFilter(form):
continue
add_token(index, form, head=0, deprel='erased', extra=None)
# erased generally disrupt the ordering of the sentence
sentence.sort()
if representation == 'basic':
sentence.renumber()
return sentence | Arguments are as in StanfordDependencies.convert_trees but with
the addition of add_lemmas. If add_lemmas=True, we will run the
Stanford CoreNLP lemmatizer and fill in the lemma field. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/JPypeBackend.py#L86-L149 | [
"def renumber(self):\n \"\"\"Destructively renumber the indices based on the actual tokens\n (e.g., if there are gaps between token indices, this will remove\n them). Old Token objects will still exist, so you'll need to\n update your references.\"\"\"\n mapping = {0: 0} # old index -> real index\n needs_renumbering = False\n for real_index, token in enumerate(self, 1):\n mapping[token.index] = real_index\n if token.index != real_index:\n needs_renumbering = True\n\n if needs_renumbering:\n # update all indices\n self[:] = [token._replace(index=mapping[token.index],\n head=mapping[token.head])\n for token in self]\n",
"def _get_deps(self, tree, include_punct, representation, universal):\n \"\"\"Get a list of dependencies from a Stanford Tree for a specific\n Stanford Dependencies representation.\"\"\"\n if universal:\n converter = self.universal_converter\n\n if self.universal_converter == self.converter:\n import warnings\n warnings.warn(\"This jar doesn't support universal \"\n \"dependencies, falling back to Stanford \"\n \"Dependencies. To suppress this message, \"\n \"call with universal=False\")\n else:\n converter = self.converter\n\n if include_punct:\n egs = converter(tree, self.acceptFilter)\n else:\n egs = converter(tree)\n\n if representation == 'basic':\n deps = egs.typedDependencies()\n elif representation == 'collapsed':\n deps = egs.typedDependenciesCollapsed(True)\n elif representation == 'CCprocessed':\n deps = egs.typedDependenciesCCprocessed(True)\n else:\n # _raise_on_bad_representation should ensure that this\n # assertion doesn't fail\n assert representation == 'collapsedTree'\n deps = egs.typedDependenciesCollapsedTree()\n return self._listify(deps)\n",
"def _listify(collection):\n \"\"\"This is a workaround where Collections are no longer iterable\n when using JPype.\"\"\"\n new_list = []\n for index in range(len(collection)):\n new_list.append(collection[index])\n return new_list\n",
"def _raise_on_bad_representation(representation):\n \"\"\"Ensure that representation is a known Stanford Dependency\n representation (raises a ValueError if the representation is\n invalid).\"\"\"\n if representation not in REPRESENTATIONS:\n repr_desc = ', '.join(map(repr, REPRESENTATIONS))\n raise ValueError(\"Unknown representation: %r (should be one \"\n \"of %s)\" % (representation, repr_desc))\n",
"def _raise_on_bad_input(ptb_tree):\n \"\"\"Ensure that ptb_tree is a valid Penn Treebank datatype or\n raises a TypeError. Currently, this requires that ptb_tree is\n a str or basestring (depending on Python version).\"\"\"\n if not isinstance(ptb_tree, string_type):\n raise TypeError(\"ptb_tree is not a string: %r\" % ptb_tree)\n",
"def add_token(index, form, head, deprel, extra):\n tag = indices_to_words[index].tag()\n if add_lemmas:\n lemma = self.stem(form, tag)\n else:\n lemma = None\n token = Token(index=index, form=form, lemma=lemma, cpos=tag,\n pos=tag, feats=None, head=head, deprel=deprel,\n phead=None, pdeprel=None, extra=extra)\n sentence.append(token)\n"
] | class JPypeBackend(StanfordDependencies):
"""Faster backend than SubprocessBackend but requires you to install
jpype ('pip install JPype1', not 'JPype'). May be less stable. There's
no speed benefit of using convert_trees() over convert_tree() for this
backend. In terms of output, should be identical to SubprocessBackend
except that all string fields will be unicode. Additionally, has
the option to run the lemmatizer (see convert_tree())."""
def __init__(self, jar_filename=None, download_if_missing=False,
version=None, extra_jvm_args=None, start_jpype=True,
jvm_path=None):
"""extra_jvm_args can be set to a list of strings which will
be passed to your JVM. If start_jpype is True, we will start
a JVM via JPype if one hasn't been started already. The user is
responsible for stopping the JVM (jpype.shutdownJVM()) when they
are done converting. Once the JVM has been shutdown, you'll need
to create a new JPypeBackend in order to convert after that.
jvm_path is the path to libjvm.so (if None, will use JPype's
default JRE path)."""
StanfordDependencies.__init__(self, jar_filename, download_if_missing,
version)
if start_jpype and not jpype.isJVMStarted():
jpype.startJVM(jvm_path or jpype.getDefaultJVMPath(),
'-ea',
'-Djava.class.path=' + self.jar_filename,
*(extra_jvm_args or []))
self.corenlp = jpype.JPackage('edu').stanford.nlp
try:
self.acceptFilter = self.corenlp.util.Filters.acceptFilter()
except TypeError:
# this appears to be caused by a mismatch between CoreNLP and JRE
# versions since this method changed to return a Predicate.
version = jpype.java.lang.System.getProperty("java.version")
self._report_version_error(version)
trees = self.corenlp.trees
self.treeReader = trees.Trees.readTree
self.converter = trees.EnglishGrammaticalStructure
self.universal_converter = trees.UniversalEnglishGrammaticalStructure
# we now need to test whether we can actually create a universal
# converter -- we'll call it with invalid number of arguments
# since we don't want create a tree just for this
try:
self.universal_converter()
except TypeError:
# this is JPype's way of saying that it doesn't exist so we
# fall back to the original converter
self.universal_converter = self.converter
except RuntimeError as re:
# this means it exists but wanted a different number of arguments
# (in other words, we have a universal converter)
assert "No matching overloads found" in str(re)
try:
self.stemmer = \
self.corenlp.process.Morphology.stemStaticSynchronized
except AttributeError:
# stemStaticSynchronized was renamed in CoreNLP 3.6.0 to stemStatic
self.stemmer = \
self.corenlp.process.Morphology.stemStatic
puncFilterInstance = trees.PennTreebankLanguagePack(). \
punctuationWordRejectFilter()
try:
self.puncFilter = puncFilterInstance.test
except AttributeError:
self.puncFilter = puncFilterInstance.accept
self.lemma_cache = {}
def stem(self, form, tag):
"""Returns the stem of word with specific form and part-of-speech
tag according to the Stanford lemmatizer. Lemmas are cached."""
key = (form, tag)
if key not in self.lemma_cache:
lemma = self.stemmer(*key).word()
self.lemma_cache[key] = lemma
return self.lemma_cache[key]
def _get_deps(self, tree, include_punct, representation, universal):
"""Get a list of dependencies from a Stanford Tree for a specific
Stanford Dependencies representation."""
if universal:
converter = self.universal_converter
if self.universal_converter == self.converter:
import warnings
warnings.warn("This jar doesn't support universal "
"dependencies, falling back to Stanford "
"Dependencies. To suppress this message, "
"call with universal=False")
else:
converter = self.converter
if include_punct:
egs = converter(tree, self.acceptFilter)
else:
egs = converter(tree)
if representation == 'basic':
deps = egs.typedDependencies()
elif representation == 'collapsed':
deps = egs.typedDependenciesCollapsed(True)
elif representation == 'CCprocessed':
deps = egs.typedDependenciesCCprocessed(True)
else:
# _raise_on_bad_representation should ensure that this
# assertion doesn't fail
assert representation == 'collapsedTree'
deps = egs.typedDependenciesCollapsedTree()
return self._listify(deps)
@staticmethod
def _listify(collection):
"""This is a workaround where Collections are no longer iterable
when using JPype."""
new_list = []
for index in range(len(collection)):
new_list.append(collection[index])
return new_list
@staticmethod
def _report_version_error(version):
print("Your Java version:", version)
if version.split('.')[:2] < ['1', '8']:
print("The last CoreNLP release for Java 1.6/1.7 was 3.4.1")
print("Try using: StanfordDependencies.get_instance("
"backend='jpype', version='3.4.1')")
print()
raise JavaRuntimeVersionError()
|
dmcc/PyStanfordDependencies | StanfordDependencies/JPypeBackend.py | JPypeBackend.stem | python | def stem(self, form, tag):
key = (form, tag)
if key not in self.lemma_cache:
lemma = self.stemmer(*key).word()
self.lemma_cache[key] = lemma
return self.lemma_cache[key] | Returns the stem of word with specific form and part-of-speech
tag according to the Stanford lemmatizer. Lemmas are cached. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/JPypeBackend.py#L150-L157 | null | class JPypeBackend(StanfordDependencies):
"""Faster backend than SubprocessBackend but requires you to install
jpype ('pip install JPype1', not 'JPype'). May be less stable. There's
no speed benefit of using convert_trees() over convert_tree() for this
backend. In terms of output, should be identical to SubprocessBackend
except that all string fields will be unicode. Additionally, has
the option to run the lemmatizer (see convert_tree())."""
def __init__(self, jar_filename=None, download_if_missing=False,
version=None, extra_jvm_args=None, start_jpype=True,
jvm_path=None):
"""extra_jvm_args can be set to a list of strings which will
be passed to your JVM. If start_jpype is True, we will start
a JVM via JPype if one hasn't been started already. The user is
responsible for stopping the JVM (jpype.shutdownJVM()) when they
are done converting. Once the JVM has been shutdown, you'll need
to create a new JPypeBackend in order to convert after that.
jvm_path is the path to libjvm.so (if None, will use JPype's
default JRE path)."""
StanfordDependencies.__init__(self, jar_filename, download_if_missing,
version)
if start_jpype and not jpype.isJVMStarted():
jpype.startJVM(jvm_path or jpype.getDefaultJVMPath(),
'-ea',
'-Djava.class.path=' + self.jar_filename,
*(extra_jvm_args or []))
self.corenlp = jpype.JPackage('edu').stanford.nlp
try:
self.acceptFilter = self.corenlp.util.Filters.acceptFilter()
except TypeError:
# this appears to be caused by a mismatch between CoreNLP and JRE
# versions since this method changed to return a Predicate.
version = jpype.java.lang.System.getProperty("java.version")
self._report_version_error(version)
trees = self.corenlp.trees
self.treeReader = trees.Trees.readTree
self.converter = trees.EnglishGrammaticalStructure
self.universal_converter = trees.UniversalEnglishGrammaticalStructure
# we now need to test whether we can actually create a universal
# converter -- we'll call it with invalid number of arguments
# since we don't want create a tree just for this
try:
self.universal_converter()
except TypeError:
# this is JPype's way of saying that it doesn't exist so we
# fall back to the original converter
self.universal_converter = self.converter
except RuntimeError as re:
# this means it exists but wanted a different number of arguments
# (in other words, we have a universal converter)
assert "No matching overloads found" in str(re)
try:
self.stemmer = \
self.corenlp.process.Morphology.stemStaticSynchronized
except AttributeError:
# stemStaticSynchronized was renamed in CoreNLP 3.6.0 to stemStatic
self.stemmer = \
self.corenlp.process.Morphology.stemStatic
puncFilterInstance = trees.PennTreebankLanguagePack(). \
punctuationWordRejectFilter()
try:
self.puncFilter = puncFilterInstance.test
except AttributeError:
self.puncFilter = puncFilterInstance.accept
self.lemma_cache = {}
def convert_tree(self, ptb_tree, representation='basic',
include_punct=True, include_erased=False,
add_lemmas=False, universal=True):
"""Arguments are as in StanfordDependencies.convert_trees but with
the addition of add_lemmas. If add_lemmas=True, we will run the
Stanford CoreNLP lemmatizer and fill in the lemma field."""
self._raise_on_bad_input(ptb_tree)
self._raise_on_bad_representation(representation)
tree = self.treeReader(ptb_tree)
if tree is None:
raise ValueError("Invalid Penn Treebank tree: %r" % ptb_tree)
deps = self._get_deps(tree, include_punct, representation,
universal=universal)
tagged_yield = self._listify(tree.taggedYield())
indices_to_words = dict(enumerate(tagged_yield, 1))
sentence = Sentence()
covered_indices = set()
def add_token(index, form, head, deprel, extra):
tag = indices_to_words[index].tag()
if add_lemmas:
lemma = self.stem(form, tag)
else:
lemma = None
token = Token(index=index, form=form, lemma=lemma, cpos=tag,
pos=tag, feats=None, head=head, deprel=deprel,
phead=None, pdeprel=None, extra=extra)
sentence.append(token)
# add token for each dependency
for dep in deps:
index = dep.dep().index()
head = dep.gov().index()
deprel = dep.reln().toString()
form = indices_to_words[index].value()
dep_is_copy = dep.dep().copyCount()
gov_is_copy = dep.gov().copyCount()
if dep_is_copy or gov_is_copy:
extra = {}
if dep_is_copy:
extra['dep_is_copy'] = dep_is_copy
if gov_is_copy:
extra['gov_is_copy'] = gov_is_copy
else:
extra = None
add_token(index, form, head, deprel, extra)
covered_indices.add(index)
if include_erased:
# see if there are any tokens that were erased
# and add them as well
all_indices = set(indices_to_words.keys())
for index in all_indices - covered_indices:
form = indices_to_words[index].value()
if not include_punct and not self.puncFilter(form):
continue
add_token(index, form, head=0, deprel='erased', extra=None)
# erased generally disrupt the ordering of the sentence
sentence.sort()
if representation == 'basic':
sentence.renumber()
return sentence
def _get_deps(self, tree, include_punct, representation, universal):
"""Get a list of dependencies from a Stanford Tree for a specific
Stanford Dependencies representation."""
if universal:
converter = self.universal_converter
if self.universal_converter == self.converter:
import warnings
warnings.warn("This jar doesn't support universal "
"dependencies, falling back to Stanford "
"Dependencies. To suppress this message, "
"call with universal=False")
else:
converter = self.converter
if include_punct:
egs = converter(tree, self.acceptFilter)
else:
egs = converter(tree)
if representation == 'basic':
deps = egs.typedDependencies()
elif representation == 'collapsed':
deps = egs.typedDependenciesCollapsed(True)
elif representation == 'CCprocessed':
deps = egs.typedDependenciesCCprocessed(True)
else:
# _raise_on_bad_representation should ensure that this
# assertion doesn't fail
assert representation == 'collapsedTree'
deps = egs.typedDependenciesCollapsedTree()
return self._listify(deps)
@staticmethod
def _listify(collection):
"""This is a workaround where Collections are no longer iterable
when using JPype."""
new_list = []
for index in range(len(collection)):
new_list.append(collection[index])
return new_list
@staticmethod
def _report_version_error(version):
print("Your Java version:", version)
if version.split('.')[:2] < ['1', '8']:
print("The last CoreNLP release for Java 1.6/1.7 was 3.4.1")
print("Try using: StanfordDependencies.get_instance("
"backend='jpype', version='3.4.1')")
print()
raise JavaRuntimeVersionError()
|
dmcc/PyStanfordDependencies | StanfordDependencies/JPypeBackend.py | JPypeBackend._get_deps | python | def _get_deps(self, tree, include_punct, representation, universal):
if universal:
converter = self.universal_converter
if self.universal_converter == self.converter:
import warnings
warnings.warn("This jar doesn't support universal "
"dependencies, falling back to Stanford "
"Dependencies. To suppress this message, "
"call with universal=False")
else:
converter = self.converter
if include_punct:
egs = converter(tree, self.acceptFilter)
else:
egs = converter(tree)
if representation == 'basic':
deps = egs.typedDependencies()
elif representation == 'collapsed':
deps = egs.typedDependenciesCollapsed(True)
elif representation == 'CCprocessed':
deps = egs.typedDependenciesCCprocessed(True)
else:
# _raise_on_bad_representation should ensure that this
# assertion doesn't fail
assert representation == 'collapsedTree'
deps = egs.typedDependenciesCollapsedTree()
return self._listify(deps) | Get a list of dependencies from a Stanford Tree for a specific
Stanford Dependencies representation. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/JPypeBackend.py#L159-L190 | [
"def _listify(collection):\n \"\"\"This is a workaround where Collections are no longer iterable\n when using JPype.\"\"\"\n new_list = []\n for index in range(len(collection)):\n new_list.append(collection[index])\n return new_list\n"
] | class JPypeBackend(StanfordDependencies):
"""Faster backend than SubprocessBackend but requires you to install
jpype ('pip install JPype1', not 'JPype'). May be less stable. There's
no speed benefit of using convert_trees() over convert_tree() for this
backend. In terms of output, should be identical to SubprocessBackend
except that all string fields will be unicode. Additionally, has
the option to run the lemmatizer (see convert_tree())."""
def __init__(self, jar_filename=None, download_if_missing=False,
version=None, extra_jvm_args=None, start_jpype=True,
jvm_path=None):
"""extra_jvm_args can be set to a list of strings which will
be passed to your JVM. If start_jpype is True, we will start
a JVM via JPype if one hasn't been started already. The user is
responsible for stopping the JVM (jpype.shutdownJVM()) when they
are done converting. Once the JVM has been shutdown, you'll need
to create a new JPypeBackend in order to convert after that.
jvm_path is the path to libjvm.so (if None, will use JPype's
default JRE path)."""
StanfordDependencies.__init__(self, jar_filename, download_if_missing,
version)
if start_jpype and not jpype.isJVMStarted():
jpype.startJVM(jvm_path or jpype.getDefaultJVMPath(),
'-ea',
'-Djava.class.path=' + self.jar_filename,
*(extra_jvm_args or []))
self.corenlp = jpype.JPackage('edu').stanford.nlp
try:
self.acceptFilter = self.corenlp.util.Filters.acceptFilter()
except TypeError:
# this appears to be caused by a mismatch between CoreNLP and JRE
# versions since this method changed to return a Predicate.
version = jpype.java.lang.System.getProperty("java.version")
self._report_version_error(version)
trees = self.corenlp.trees
self.treeReader = trees.Trees.readTree
self.converter = trees.EnglishGrammaticalStructure
self.universal_converter = trees.UniversalEnglishGrammaticalStructure
# we now need to test whether we can actually create a universal
# converter -- we'll call it with invalid number of arguments
# since we don't want create a tree just for this
try:
self.universal_converter()
except TypeError:
# this is JPype's way of saying that it doesn't exist so we
# fall back to the original converter
self.universal_converter = self.converter
except RuntimeError as re:
# this means it exists but wanted a different number of arguments
# (in other words, we have a universal converter)
assert "No matching overloads found" in str(re)
try:
self.stemmer = \
self.corenlp.process.Morphology.stemStaticSynchronized
except AttributeError:
# stemStaticSynchronized was renamed in CoreNLP 3.6.0 to stemStatic
self.stemmer = \
self.corenlp.process.Morphology.stemStatic
puncFilterInstance = trees.PennTreebankLanguagePack(). \
punctuationWordRejectFilter()
try:
self.puncFilter = puncFilterInstance.test
except AttributeError:
self.puncFilter = puncFilterInstance.accept
self.lemma_cache = {}
def convert_tree(self, ptb_tree, representation='basic',
include_punct=True, include_erased=False,
add_lemmas=False, universal=True):
"""Arguments are as in StanfordDependencies.convert_trees but with
the addition of add_lemmas. If add_lemmas=True, we will run the
Stanford CoreNLP lemmatizer and fill in the lemma field."""
self._raise_on_bad_input(ptb_tree)
self._raise_on_bad_representation(representation)
tree = self.treeReader(ptb_tree)
if tree is None:
raise ValueError("Invalid Penn Treebank tree: %r" % ptb_tree)
deps = self._get_deps(tree, include_punct, representation,
universal=universal)
tagged_yield = self._listify(tree.taggedYield())
indices_to_words = dict(enumerate(tagged_yield, 1))
sentence = Sentence()
covered_indices = set()
def add_token(index, form, head, deprel, extra):
tag = indices_to_words[index].tag()
if add_lemmas:
lemma = self.stem(form, tag)
else:
lemma = None
token = Token(index=index, form=form, lemma=lemma, cpos=tag,
pos=tag, feats=None, head=head, deprel=deprel,
phead=None, pdeprel=None, extra=extra)
sentence.append(token)
# add token for each dependency
for dep in deps:
index = dep.dep().index()
head = dep.gov().index()
deprel = dep.reln().toString()
form = indices_to_words[index].value()
dep_is_copy = dep.dep().copyCount()
gov_is_copy = dep.gov().copyCount()
if dep_is_copy or gov_is_copy:
extra = {}
if dep_is_copy:
extra['dep_is_copy'] = dep_is_copy
if gov_is_copy:
extra['gov_is_copy'] = gov_is_copy
else:
extra = None
add_token(index, form, head, deprel, extra)
covered_indices.add(index)
if include_erased:
# see if there are any tokens that were erased
# and add them as well
all_indices = set(indices_to_words.keys())
for index in all_indices - covered_indices:
form = indices_to_words[index].value()
if not include_punct and not self.puncFilter(form):
continue
add_token(index, form, head=0, deprel='erased', extra=None)
# erased generally disrupt the ordering of the sentence
sentence.sort()
if representation == 'basic':
sentence.renumber()
return sentence
def stem(self, form, tag):
"""Returns the stem of word with specific form and part-of-speech
tag according to the Stanford lemmatizer. Lemmas are cached."""
key = (form, tag)
if key not in self.lemma_cache:
lemma = self.stemmer(*key).word()
self.lemma_cache[key] = lemma
return self.lemma_cache[key]
@staticmethod
def _listify(collection):
"""This is a workaround where Collections are no longer iterable
when using JPype."""
new_list = []
for index in range(len(collection)):
new_list.append(collection[index])
return new_list
@staticmethod
def _report_version_error(version):
print("Your Java version:", version)
if version.split('.')[:2] < ['1', '8']:
print("The last CoreNLP release for Java 1.6/1.7 was 3.4.1")
print("Try using: StanfordDependencies.get_instance("
"backend='jpype', version='3.4.1')")
print()
raise JavaRuntimeVersionError()
|
dmcc/PyStanfordDependencies | StanfordDependencies/JPypeBackend.py | JPypeBackend._listify | python | def _listify(collection):
new_list = []
for index in range(len(collection)):
new_list.append(collection[index])
return new_list | This is a workaround where Collections are no longer iterable
when using JPype. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/JPypeBackend.py#L193-L199 | null | class JPypeBackend(StanfordDependencies):
"""Faster backend than SubprocessBackend but requires you to install
jpype ('pip install JPype1', not 'JPype'). May be less stable. There's
no speed benefit of using convert_trees() over convert_tree() for this
backend. In terms of output, should be identical to SubprocessBackend
except that all string fields will be unicode. Additionally, has
the option to run the lemmatizer (see convert_tree())."""
def __init__(self, jar_filename=None, download_if_missing=False,
version=None, extra_jvm_args=None, start_jpype=True,
jvm_path=None):
"""extra_jvm_args can be set to a list of strings which will
be passed to your JVM. If start_jpype is True, we will start
a JVM via JPype if one hasn't been started already. The user is
responsible for stopping the JVM (jpype.shutdownJVM()) when they
are done converting. Once the JVM has been shutdown, you'll need
to create a new JPypeBackend in order to convert after that.
jvm_path is the path to libjvm.so (if None, will use JPype's
default JRE path)."""
StanfordDependencies.__init__(self, jar_filename, download_if_missing,
version)
if start_jpype and not jpype.isJVMStarted():
jpype.startJVM(jvm_path or jpype.getDefaultJVMPath(),
'-ea',
'-Djava.class.path=' + self.jar_filename,
*(extra_jvm_args or []))
self.corenlp = jpype.JPackage('edu').stanford.nlp
try:
self.acceptFilter = self.corenlp.util.Filters.acceptFilter()
except TypeError:
# this appears to be caused by a mismatch between CoreNLP and JRE
# versions since this method changed to return a Predicate.
version = jpype.java.lang.System.getProperty("java.version")
self._report_version_error(version)
trees = self.corenlp.trees
self.treeReader = trees.Trees.readTree
self.converter = trees.EnglishGrammaticalStructure
self.universal_converter = trees.UniversalEnglishGrammaticalStructure
# we now need to test whether we can actually create a universal
# converter -- we'll call it with invalid number of arguments
# since we don't want create a tree just for this
try:
self.universal_converter()
except TypeError:
# this is JPype's way of saying that it doesn't exist so we
# fall back to the original converter
self.universal_converter = self.converter
except RuntimeError as re:
# this means it exists but wanted a different number of arguments
# (in other words, we have a universal converter)
assert "No matching overloads found" in str(re)
try:
self.stemmer = \
self.corenlp.process.Morphology.stemStaticSynchronized
except AttributeError:
# stemStaticSynchronized was renamed in CoreNLP 3.6.0 to stemStatic
self.stemmer = \
self.corenlp.process.Morphology.stemStatic
puncFilterInstance = trees.PennTreebankLanguagePack(). \
punctuationWordRejectFilter()
try:
self.puncFilter = puncFilterInstance.test
except AttributeError:
self.puncFilter = puncFilterInstance.accept
self.lemma_cache = {}
def convert_tree(self, ptb_tree, representation='basic',
include_punct=True, include_erased=False,
add_lemmas=False, universal=True):
"""Arguments are as in StanfordDependencies.convert_trees but with
the addition of add_lemmas. If add_lemmas=True, we will run the
Stanford CoreNLP lemmatizer and fill in the lemma field."""
self._raise_on_bad_input(ptb_tree)
self._raise_on_bad_representation(representation)
tree = self.treeReader(ptb_tree)
if tree is None:
raise ValueError("Invalid Penn Treebank tree: %r" % ptb_tree)
deps = self._get_deps(tree, include_punct, representation,
universal=universal)
tagged_yield = self._listify(tree.taggedYield())
indices_to_words = dict(enumerate(tagged_yield, 1))
sentence = Sentence()
covered_indices = set()
def add_token(index, form, head, deprel, extra):
tag = indices_to_words[index].tag()
if add_lemmas:
lemma = self.stem(form, tag)
else:
lemma = None
token = Token(index=index, form=form, lemma=lemma, cpos=tag,
pos=tag, feats=None, head=head, deprel=deprel,
phead=None, pdeprel=None, extra=extra)
sentence.append(token)
# add token for each dependency
for dep in deps:
index = dep.dep().index()
head = dep.gov().index()
deprel = dep.reln().toString()
form = indices_to_words[index].value()
dep_is_copy = dep.dep().copyCount()
gov_is_copy = dep.gov().copyCount()
if dep_is_copy or gov_is_copy:
extra = {}
if dep_is_copy:
extra['dep_is_copy'] = dep_is_copy
if gov_is_copy:
extra['gov_is_copy'] = gov_is_copy
else:
extra = None
add_token(index, form, head, deprel, extra)
covered_indices.add(index)
if include_erased:
# see if there are any tokens that were erased
# and add them as well
all_indices = set(indices_to_words.keys())
for index in all_indices - covered_indices:
form = indices_to_words[index].value()
if not include_punct and not self.puncFilter(form):
continue
add_token(index, form, head=0, deprel='erased', extra=None)
# erased generally disrupt the ordering of the sentence
sentence.sort()
if representation == 'basic':
sentence.renumber()
return sentence
def stem(self, form, tag):
"""Returns the stem of word with specific form and part-of-speech
tag according to the Stanford lemmatizer. Lemmas are cached."""
key = (form, tag)
if key not in self.lemma_cache:
lemma = self.stemmer(*key).word()
self.lemma_cache[key] = lemma
return self.lemma_cache[key]
def _get_deps(self, tree, include_punct, representation, universal):
"""Get a list of dependencies from a Stanford Tree for a specific
Stanford Dependencies representation."""
if universal:
converter = self.universal_converter
if self.universal_converter == self.converter:
import warnings
warnings.warn("This jar doesn't support universal "
"dependencies, falling back to Stanford "
"Dependencies. To suppress this message, "
"call with universal=False")
else:
converter = self.converter
if include_punct:
egs = converter(tree, self.acceptFilter)
else:
egs = converter(tree)
if representation == 'basic':
deps = egs.typedDependencies()
elif representation == 'collapsed':
deps = egs.typedDependenciesCollapsed(True)
elif representation == 'CCprocessed':
deps = egs.typedDependenciesCCprocessed(True)
else:
# _raise_on_bad_representation should ensure that this
# assertion doesn't fail
assert representation == 'collapsedTree'
deps = egs.typedDependenciesCollapsedTree()
return self._listify(deps)
@staticmethod
@staticmethod
def _report_version_error(version):
print("Your Java version:", version)
if version.split('.')[:2] < ['1', '8']:
print("The last CoreNLP release for Java 1.6/1.7 was 3.4.1")
print("Try using: StanfordDependencies.get_instance("
"backend='jpype', version='3.4.1')")
print()
raise JavaRuntimeVersionError()
|
dmcc/PyStanfordDependencies | StanfordDependencies/CoNLL.py | Token.as_conll | python | def as_conll(self):
def get(field):
value = getattr(self, field)
if value is None:
value = '_'
elif field == 'feats':
value = '|'.join(value)
return str(value)
return '\t'.join([get(field) for field in FIELD_NAMES]) | Represent this Token as a line as a string in CoNLL-X format. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L74-L83 | null | class Token(namedtuple('Token', FIELD_NAMES_PLUS)):
"""CoNLL-X style dependency token. Fields include:
- form (the word form)
- lemma (the word's base form or lemma) -- empty for SubprocessBackend
- pos (part of speech tag)
- index (index of the token in the sentence)
- head (index of the head of this token), and
- deprel (the dependency relation between this token and its head)
There are other fields but they typically won't be populated by
StanfordDependencies. Fields are immutable.
See CoNLL-X shared task on Multilingual Dependency Parsing by
Buchholz and Marsi(2006) (http://aclweb.org/anthology/W06-2920)
(Section 3: Data format, task definition)
for a complete description."""
def __lt__(self, other):
"""Provides an ordering over Tokens. Tokens are compared by each
field in order."""
if not isinstance(other, Token):
raise TypeError("unorderable types: %s < %s" %
(self.__class__.__name__,
other.__class__.__name__))
self_fields = self[:-1]
other_fields = other[:-1]
def get_extra(token):
if token.extra is None:
return []
else:
return sorted(token.extra.items())
if self_fields == other_fields:
return get_extra(self) < get_extra(other)
else:
return self_fields < other_fields
def __repr__(self):
"""Represent this Token as Python code. Note that the resulting
representation may not be a valid Python call since this skips
fields with empty values."""
# slightly different from the official tuple __repr__ in that
# we skip any fields with None as their value
items = [(field, getattr(self, field, None))
for field in FIELD_NAMES_PLUS]
fields = ['%s=%r' % (k, v) for k, v in items if v is not None]
return '%s(%s)' % (self.__class__.__name__, ', '.join(fields))
@classmethod
def from_conll(this_class, text):
"""Construct a Token from a line in CoNLL-X format."""
fields = text.split('\t')
fields[0] = int(fields[0]) # index
fields[6] = int(fields[6]) # head index
if fields[5] != '_': # feats
fields[5] = tuple(fields[5].split('|'))
fields = [value if value != '_' else None for value in fields]
fields.append(None) # for extra
return this_class(**dict(zip(FIELD_NAMES_PLUS, fields)))
|
dmcc/PyStanfordDependencies | StanfordDependencies/CoNLL.py | Token.from_conll | python | def from_conll(this_class, text):
fields = text.split('\t')
fields[0] = int(fields[0]) # index
fields[6] = int(fields[6]) # head index
if fields[5] != '_': # feats
fields[5] = tuple(fields[5].split('|'))
fields = [value if value != '_' else None for value in fields]
fields.append(None) # for extra
return this_class(**dict(zip(FIELD_NAMES_PLUS, fields))) | Construct a Token from a line in CoNLL-X format. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L85-L94 | null | class Token(namedtuple('Token', FIELD_NAMES_PLUS)):
"""CoNLL-X style dependency token. Fields include:
- form (the word form)
- lemma (the word's base form or lemma) -- empty for SubprocessBackend
- pos (part of speech tag)
- index (index of the token in the sentence)
- head (index of the head of this token), and
- deprel (the dependency relation between this token and its head)
There are other fields but they typically won't be populated by
StanfordDependencies. Fields are immutable.
See CoNLL-X shared task on Multilingual Dependency Parsing by
Buchholz and Marsi(2006) (http://aclweb.org/anthology/W06-2920)
(Section 3: Data format, task definition)
for a complete description."""
def __lt__(self, other):
"""Provides an ordering over Tokens. Tokens are compared by each
field in order."""
if not isinstance(other, Token):
raise TypeError("unorderable types: %s < %s" %
(self.__class__.__name__,
other.__class__.__name__))
self_fields = self[:-1]
other_fields = other[:-1]
def get_extra(token):
if token.extra is None:
return []
else:
return sorted(token.extra.items())
if self_fields == other_fields:
return get_extra(self) < get_extra(other)
else:
return self_fields < other_fields
def __repr__(self):
"""Represent this Token as Python code. Note that the resulting
representation may not be a valid Python call since this skips
fields with empty values."""
# slightly different from the official tuple __repr__ in that
# we skip any fields with None as their value
items = [(field, getattr(self, field, None))
for field in FIELD_NAMES_PLUS]
fields = ['%s=%r' % (k, v) for k, v in items if v is not None]
return '%s(%s)' % (self.__class__.__name__, ', '.join(fields))
def as_conll(self):
"""Represent this Token as a line as a string in CoNLL-X format."""
def get(field):
value = getattr(self, field)
if value is None:
value = '_'
elif field == 'feats':
value = '|'.join(value)
return str(value)
return '\t'.join([get(field) for field in FIELD_NAMES])
@classmethod
|
dmcc/PyStanfordDependencies | StanfordDependencies/CoNLL.py | Sentence.renumber | python | def renumber(self):
mapping = {0: 0} # old index -> real index
needs_renumbering = False
for real_index, token in enumerate(self, 1):
mapping[token.index] = real_index
if token.index != real_index:
needs_renumbering = True
if needs_renumbering:
# update all indices
self[:] = [token._replace(index=mapping[token.index],
head=mapping[token.head])
for token in self] | Destructively renumber the indices based on the actual tokens
(e.g., if there are gaps between token indices, this will remove
them). Old Token objects will still exist, so you'll need to
update your references. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L98-L114 | null | class Sentence(list):
"""Sequence of Token objects."""
def as_conll(self):
"""Represent this Sentence as a string in CoNLL-X format. Note
that this doesn't end in a newline. Also see Corpus.as_conll()
for converting multiple sentences."""
return '\n'.join(token.as_conll() for token in self)
def as_asciitree(self, str_func=None):
"""Represent this Sentence as an ASCII tree string. Requires
the asciitree package. A default token stringifier is provided
but for custom formatting, specify a str_func which should take
a single Token and return a string."""
import asciitree
from collections import defaultdict
children = defaultdict(list)
# since erased nodes may be missing, multiple tokens may have same
# index (CCprocessed), etc.
token_to_index = {}
roots = []
for token in self:
children[token.head].append(token)
token_to_index[token] = token.index
if token.head == 0:
roots.append(token)
assert roots, "Couldn't find root Token(s)"
if len(roots) > 1:
# multiple roots so we make a fake one to be their parent
root = Token(0, 'ROOT', 'ROOT-LEMMA', 'ROOT-CPOS', 'ROOT-POS',
None, None, 'ROOT-DEPREL', None, None, None)
token_to_index[root] = 0
children[0] = roots
else:
root = roots[0]
def child_func(token):
index = token_to_index[token]
return children[index]
if not str_func:
def str_func(token):
return ' %s [%s]' % (token.form, token.deprel)
return asciitree.draw_tree(root, child_func, str_func)
def as_dotgraph(self, digraph_kwargs=None, id_prefix=None,
node_formatter=None, edge_formatter=None):
"""Returns this sentence as a graphviz.Digraph. Requires the
graphviz Python package and graphviz itself. There are several
ways to customize. Graph level keyword arguments can be passed
as a dictionary to digraph_kwargs. If you're viewing multiple
Sentences in the same graph, you'll need to set a unique prefix
string in id_prefix. Lastly, you can change the formatting of
nodes and edges with node_formatter and edge_formatter. Both
take a single Token as an argument (for edge_formatter, the
Token represents the child token) and return a dictionary of
keyword arguments which are passed to the node and edge creation
functions in graphviz. The node_formatter will also be called
with None as its token when adding the root."""
digraph_kwargs = digraph_kwargs or {}
id_prefix = id_prefix or ''
node_formatter = node_formatter or (lambda token: {})
edge_formatter = edge_formatter or (lambda token: {})
import graphviz
graph = graphviz.Digraph(**digraph_kwargs)
# add root node
graph.node(id_prefix + '0', 'root', **node_formatter(None))
# add remaining nodes and edges
already_added = set()
for token in self:
token_id = id_prefix + str(token.index)
parent_id = id_prefix + str(token.head)
if token_id not in already_added:
graph.node(token_id, token.form, **node_formatter(token))
graph.edge(parent_id, token_id, label=token.deprel,
**edge_formatter(token))
already_added.add(token_id)
return graph
@classmethod
def from_conll(this_class, stream):
"""Construct a Sentence. stream is an iterable over strings where
each string is a line in CoNLL-X format. If there are multiple
sentences in this stream, we only return the first one."""
stream = iter(stream)
sentence = this_class()
for line in stream:
line = line.strip()
if line:
sentence.append(Token.from_conll(line))
elif sentence:
return sentence
return sentence
@classmethod
def from_stanford_dependencies(this_class, stream, tree,
include_erased=False, include_punct=True):
"""Construct a Sentence. stream is an iterable over strings
where each string is a line representing a Stanford Dependency
as in the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
The corresponding Penn Treebank formatted tree must be provided
as well."""
stream = iter(stream)
sentence = this_class()
covered_indices = set()
tags_and_words = ptb_tags_and_words_re.findall(tree)
# perform some basic cleanups
tags_and_words = [(tag, word.replace(r'\/', '/'))
for (tag, word) in tags_and_words if tag != '-NONE-']
for line in stream:
if not line.strip():
if sentence:
# empty line means the sentence is over
break
else:
continue
line = line.replace(r'\/', '/')
matches = deps_re.findall(line)
assert len(matches) == 1
(deprel, gov_form, head, gov_is_copy, form, index,
dep_is_copy) = matches[0]
index = int(index)
tag, word = tags_and_words[index - 1]
assert form == word
covered_indices.add(index)
if not include_punct and deprel == 'punct':
continue
if gov_is_copy or dep_is_copy:
extra = {}
if gov_is_copy:
extra['gov_is_copy'] = len(gov_is_copy)
if dep_is_copy:
extra['dep_is_copy'] = len(dep_is_copy)
else:
extra = None
token = Token(index, form, None, tag, tag, None, int(head),
deprel, None, None, extra)
sentence.append(token)
if include_erased:
# look through words in the tree to see if any of them
# were erased
for index, (tag, word) in enumerate(tags_and_words, 1):
if index in covered_indices:
continue
token = Token(index, word, None, tag, tag, None, 0,
'erased', None, None, None)
sentence.append(token)
sentence.sort()
return sentence
|
dmcc/PyStanfordDependencies | StanfordDependencies/CoNLL.py | Sentence.as_asciitree | python | def as_asciitree(self, str_func=None):
import asciitree
from collections import defaultdict
children = defaultdict(list)
# since erased nodes may be missing, multiple tokens may have same
# index (CCprocessed), etc.
token_to_index = {}
roots = []
for token in self:
children[token.head].append(token)
token_to_index[token] = token.index
if token.head == 0:
roots.append(token)
assert roots, "Couldn't find root Token(s)"
if len(roots) > 1:
# multiple roots so we make a fake one to be their parent
root = Token(0, 'ROOT', 'ROOT-LEMMA', 'ROOT-CPOS', 'ROOT-POS',
None, None, 'ROOT-DEPREL', None, None, None)
token_to_index[root] = 0
children[0] = roots
else:
root = roots[0]
def child_func(token):
index = token_to_index[token]
return children[index]
if not str_func:
def str_func(token):
return ' %s [%s]' % (token.form, token.deprel)
return asciitree.draw_tree(root, child_func, str_func) | Represent this Sentence as an ASCII tree string. Requires
the asciitree package. A default token stringifier is provided
but for custom formatting, specify a str_func which should take
a single Token and return a string. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L120-L155 | null | class Sentence(list):
"""Sequence of Token objects."""
def renumber(self):
"""Destructively renumber the indices based on the actual tokens
(e.g., if there are gaps between token indices, this will remove
them). Old Token objects will still exist, so you'll need to
update your references."""
mapping = {0: 0} # old index -> real index
needs_renumbering = False
for real_index, token in enumerate(self, 1):
mapping[token.index] = real_index
if token.index != real_index:
needs_renumbering = True
if needs_renumbering:
# update all indices
self[:] = [token._replace(index=mapping[token.index],
head=mapping[token.head])
for token in self]
def as_conll(self):
"""Represent this Sentence as a string in CoNLL-X format. Note
that this doesn't end in a newline. Also see Corpus.as_conll()
for converting multiple sentences."""
return '\n'.join(token.as_conll() for token in self)
def as_dotgraph(self, digraph_kwargs=None, id_prefix=None,
node_formatter=None, edge_formatter=None):
"""Returns this sentence as a graphviz.Digraph. Requires the
graphviz Python package and graphviz itself. There are several
ways to customize. Graph level keyword arguments can be passed
as a dictionary to digraph_kwargs. If you're viewing multiple
Sentences in the same graph, you'll need to set a unique prefix
string in id_prefix. Lastly, you can change the formatting of
nodes and edges with node_formatter and edge_formatter. Both
take a single Token as an argument (for edge_formatter, the
Token represents the child token) and return a dictionary of
keyword arguments which are passed to the node and edge creation
functions in graphviz. The node_formatter will also be called
with None as its token when adding the root."""
digraph_kwargs = digraph_kwargs or {}
id_prefix = id_prefix or ''
node_formatter = node_formatter or (lambda token: {})
edge_formatter = edge_formatter or (lambda token: {})
import graphviz
graph = graphviz.Digraph(**digraph_kwargs)
# add root node
graph.node(id_prefix + '0', 'root', **node_formatter(None))
# add remaining nodes and edges
already_added = set()
for token in self:
token_id = id_prefix + str(token.index)
parent_id = id_prefix + str(token.head)
if token_id not in already_added:
graph.node(token_id, token.form, **node_formatter(token))
graph.edge(parent_id, token_id, label=token.deprel,
**edge_formatter(token))
already_added.add(token_id)
return graph
@classmethod
def from_conll(this_class, stream):
"""Construct a Sentence. stream is an iterable over strings where
each string is a line in CoNLL-X format. If there are multiple
sentences in this stream, we only return the first one."""
stream = iter(stream)
sentence = this_class()
for line in stream:
line = line.strip()
if line:
sentence.append(Token.from_conll(line))
elif sentence:
return sentence
return sentence
@classmethod
def from_stanford_dependencies(this_class, stream, tree,
include_erased=False, include_punct=True):
"""Construct a Sentence. stream is an iterable over strings
where each string is a line representing a Stanford Dependency
as in the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
The corresponding Penn Treebank formatted tree must be provided
as well."""
stream = iter(stream)
sentence = this_class()
covered_indices = set()
tags_and_words = ptb_tags_and_words_re.findall(tree)
# perform some basic cleanups
tags_and_words = [(tag, word.replace(r'\/', '/'))
for (tag, word) in tags_and_words if tag != '-NONE-']
for line in stream:
if not line.strip():
if sentence:
# empty line means the sentence is over
break
else:
continue
line = line.replace(r'\/', '/')
matches = deps_re.findall(line)
assert len(matches) == 1
(deprel, gov_form, head, gov_is_copy, form, index,
dep_is_copy) = matches[0]
index = int(index)
tag, word = tags_and_words[index - 1]
assert form == word
covered_indices.add(index)
if not include_punct and deprel == 'punct':
continue
if gov_is_copy or dep_is_copy:
extra = {}
if gov_is_copy:
extra['gov_is_copy'] = len(gov_is_copy)
if dep_is_copy:
extra['dep_is_copy'] = len(dep_is_copy)
else:
extra = None
token = Token(index, form, None, tag, tag, None, int(head),
deprel, None, None, extra)
sentence.append(token)
if include_erased:
# look through words in the tree to see if any of them
# were erased
for index, (tag, word) in enumerate(tags_and_words, 1):
if index in covered_indices:
continue
token = Token(index, word, None, tag, tag, None, 0,
'erased', None, None, None)
sentence.append(token)
sentence.sort()
return sentence
|
dmcc/PyStanfordDependencies | StanfordDependencies/CoNLL.py | Sentence.as_dotgraph | python | def as_dotgraph(self, digraph_kwargs=None, id_prefix=None,
node_formatter=None, edge_formatter=None):
digraph_kwargs = digraph_kwargs or {}
id_prefix = id_prefix or ''
node_formatter = node_formatter or (lambda token: {})
edge_formatter = edge_formatter or (lambda token: {})
import graphviz
graph = graphviz.Digraph(**digraph_kwargs)
# add root node
graph.node(id_prefix + '0', 'root', **node_formatter(None))
# add remaining nodes and edges
already_added = set()
for token in self:
token_id = id_prefix + str(token.index)
parent_id = id_prefix + str(token.head)
if token_id not in already_added:
graph.node(token_id, token.form, **node_formatter(token))
graph.edge(parent_id, token_id, label=token.deprel,
**edge_formatter(token))
already_added.add(token_id)
return graph | Returns this sentence as a graphviz.Digraph. Requires the
graphviz Python package and graphviz itself. There are several
ways to customize. Graph level keyword arguments can be passed
as a dictionary to digraph_kwargs. If you're viewing multiple
Sentences in the same graph, you'll need to set a unique prefix
string in id_prefix. Lastly, you can change the formatting of
nodes and edges with node_formatter and edge_formatter. Both
take a single Token as an argument (for edge_formatter, the
Token represents the child token) and return a dictionary of
keyword arguments which are passed to the node and edge creation
functions in graphviz. The node_formatter will also be called
with None as its token when adding the root. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L156-L191 | null | class Sentence(list):
"""Sequence of Token objects."""
def renumber(self):
"""Destructively renumber the indices based on the actual tokens
(e.g., if there are gaps between token indices, this will remove
them). Old Token objects will still exist, so you'll need to
update your references."""
mapping = {0: 0} # old index -> real index
needs_renumbering = False
for real_index, token in enumerate(self, 1):
mapping[token.index] = real_index
if token.index != real_index:
needs_renumbering = True
if needs_renumbering:
# update all indices
self[:] = [token._replace(index=mapping[token.index],
head=mapping[token.head])
for token in self]
def as_conll(self):
"""Represent this Sentence as a string in CoNLL-X format. Note
that this doesn't end in a newline. Also see Corpus.as_conll()
for converting multiple sentences."""
return '\n'.join(token.as_conll() for token in self)
def as_asciitree(self, str_func=None):
"""Represent this Sentence as an ASCII tree string. Requires
the asciitree package. A default token stringifier is provided
but for custom formatting, specify a str_func which should take
a single Token and return a string."""
import asciitree
from collections import defaultdict
children = defaultdict(list)
# since erased nodes may be missing, multiple tokens may have same
# index (CCprocessed), etc.
token_to_index = {}
roots = []
for token in self:
children[token.head].append(token)
token_to_index[token] = token.index
if token.head == 0:
roots.append(token)
assert roots, "Couldn't find root Token(s)"
if len(roots) > 1:
# multiple roots so we make a fake one to be their parent
root = Token(0, 'ROOT', 'ROOT-LEMMA', 'ROOT-CPOS', 'ROOT-POS',
None, None, 'ROOT-DEPREL', None, None, None)
token_to_index[root] = 0
children[0] = roots
else:
root = roots[0]
def child_func(token):
index = token_to_index[token]
return children[index]
if not str_func:
def str_func(token):
return ' %s [%s]' % (token.form, token.deprel)
return asciitree.draw_tree(root, child_func, str_func)
@classmethod
def from_conll(this_class, stream):
"""Construct a Sentence. stream is an iterable over strings where
each string is a line in CoNLL-X format. If there are multiple
sentences in this stream, we only return the first one."""
stream = iter(stream)
sentence = this_class()
for line in stream:
line = line.strip()
if line:
sentence.append(Token.from_conll(line))
elif sentence:
return sentence
return sentence
@classmethod
def from_stanford_dependencies(this_class, stream, tree,
include_erased=False, include_punct=True):
"""Construct a Sentence. stream is an iterable over strings
where each string is a line representing a Stanford Dependency
as in the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
The corresponding Penn Treebank formatted tree must be provided
as well."""
stream = iter(stream)
sentence = this_class()
covered_indices = set()
tags_and_words = ptb_tags_and_words_re.findall(tree)
# perform some basic cleanups
tags_and_words = [(tag, word.replace(r'\/', '/'))
for (tag, word) in tags_and_words if tag != '-NONE-']
for line in stream:
if not line.strip():
if sentence:
# empty line means the sentence is over
break
else:
continue
line = line.replace(r'\/', '/')
matches = deps_re.findall(line)
assert len(matches) == 1
(deprel, gov_form, head, gov_is_copy, form, index,
dep_is_copy) = matches[0]
index = int(index)
tag, word = tags_and_words[index - 1]
assert form == word
covered_indices.add(index)
if not include_punct and deprel == 'punct':
continue
if gov_is_copy or dep_is_copy:
extra = {}
if gov_is_copy:
extra['gov_is_copy'] = len(gov_is_copy)
if dep_is_copy:
extra['dep_is_copy'] = len(dep_is_copy)
else:
extra = None
token = Token(index, form, None, tag, tag, None, int(head),
deprel, None, None, extra)
sentence.append(token)
if include_erased:
# look through words in the tree to see if any of them
# were erased
for index, (tag, word) in enumerate(tags_and_words, 1):
if index in covered_indices:
continue
token = Token(index, word, None, tag, tag, None, 0,
'erased', None, None, None)
sentence.append(token)
sentence.sort()
return sentence
|
dmcc/PyStanfordDependencies | StanfordDependencies/CoNLL.py | Sentence.from_conll | python | def from_conll(this_class, stream):
stream = iter(stream)
sentence = this_class()
for line in stream:
line = line.strip()
if line:
sentence.append(Token.from_conll(line))
elif sentence:
return sentence
return sentence | Construct a Sentence. stream is an iterable over strings where
each string is a line in CoNLL-X format. If there are multiple
sentences in this stream, we only return the first one. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L194-L206 | null | class Sentence(list):
"""Sequence of Token objects."""
def renumber(self):
"""Destructively renumber the indices based on the actual tokens
(e.g., if there are gaps between token indices, this will remove
them). Old Token objects will still exist, so you'll need to
update your references."""
mapping = {0: 0} # old index -> real index
needs_renumbering = False
for real_index, token in enumerate(self, 1):
mapping[token.index] = real_index
if token.index != real_index:
needs_renumbering = True
if needs_renumbering:
# update all indices
self[:] = [token._replace(index=mapping[token.index],
head=mapping[token.head])
for token in self]
def as_conll(self):
"""Represent this Sentence as a string in CoNLL-X format. Note
that this doesn't end in a newline. Also see Corpus.as_conll()
for converting multiple sentences."""
return '\n'.join(token.as_conll() for token in self)
def as_asciitree(self, str_func=None):
"""Represent this Sentence as an ASCII tree string. Requires
the asciitree package. A default token stringifier is provided
but for custom formatting, specify a str_func which should take
a single Token and return a string."""
import asciitree
from collections import defaultdict
children = defaultdict(list)
# since erased nodes may be missing, multiple tokens may have same
# index (CCprocessed), etc.
token_to_index = {}
roots = []
for token in self:
children[token.head].append(token)
token_to_index[token] = token.index
if token.head == 0:
roots.append(token)
assert roots, "Couldn't find root Token(s)"
if len(roots) > 1:
# multiple roots so we make a fake one to be their parent
root = Token(0, 'ROOT', 'ROOT-LEMMA', 'ROOT-CPOS', 'ROOT-POS',
None, None, 'ROOT-DEPREL', None, None, None)
token_to_index[root] = 0
children[0] = roots
else:
root = roots[0]
def child_func(token):
index = token_to_index[token]
return children[index]
if not str_func:
def str_func(token):
return ' %s [%s]' % (token.form, token.deprel)
return asciitree.draw_tree(root, child_func, str_func)
def as_dotgraph(self, digraph_kwargs=None, id_prefix=None,
node_formatter=None, edge_formatter=None):
"""Returns this sentence as a graphviz.Digraph. Requires the
graphviz Python package and graphviz itself. There are several
ways to customize. Graph level keyword arguments can be passed
as a dictionary to digraph_kwargs. If you're viewing multiple
Sentences in the same graph, you'll need to set a unique prefix
string in id_prefix. Lastly, you can change the formatting of
nodes and edges with node_formatter and edge_formatter. Both
take a single Token as an argument (for edge_formatter, the
Token represents the child token) and return a dictionary of
keyword arguments which are passed to the node and edge creation
functions in graphviz. The node_formatter will also be called
with None as its token when adding the root."""
digraph_kwargs = digraph_kwargs or {}
id_prefix = id_prefix or ''
node_formatter = node_formatter or (lambda token: {})
edge_formatter = edge_formatter or (lambda token: {})
import graphviz
graph = graphviz.Digraph(**digraph_kwargs)
# add root node
graph.node(id_prefix + '0', 'root', **node_formatter(None))
# add remaining nodes and edges
already_added = set()
for token in self:
token_id = id_prefix + str(token.index)
parent_id = id_prefix + str(token.head)
if token_id not in already_added:
graph.node(token_id, token.form, **node_formatter(token))
graph.edge(parent_id, token_id, label=token.deprel,
**edge_formatter(token))
already_added.add(token_id)
return graph
@classmethod
@classmethod
def from_stanford_dependencies(this_class, stream, tree,
include_erased=False, include_punct=True):
"""Construct a Sentence. stream is an iterable over strings
where each string is a line representing a Stanford Dependency
as in the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
The corresponding Penn Treebank formatted tree must be provided
as well."""
stream = iter(stream)
sentence = this_class()
covered_indices = set()
tags_and_words = ptb_tags_and_words_re.findall(tree)
# perform some basic cleanups
tags_and_words = [(tag, word.replace(r'\/', '/'))
for (tag, word) in tags_and_words if tag != '-NONE-']
for line in stream:
if not line.strip():
if sentence:
# empty line means the sentence is over
break
else:
continue
line = line.replace(r'\/', '/')
matches = deps_re.findall(line)
assert len(matches) == 1
(deprel, gov_form, head, gov_is_copy, form, index,
dep_is_copy) = matches[0]
index = int(index)
tag, word = tags_and_words[index - 1]
assert form == word
covered_indices.add(index)
if not include_punct and deprel == 'punct':
continue
if gov_is_copy or dep_is_copy:
extra = {}
if gov_is_copy:
extra['gov_is_copy'] = len(gov_is_copy)
if dep_is_copy:
extra['dep_is_copy'] = len(dep_is_copy)
else:
extra = None
token = Token(index, form, None, tag, tag, None, int(head),
deprel, None, None, extra)
sentence.append(token)
if include_erased:
# look through words in the tree to see if any of them
# were erased
for index, (tag, word) in enumerate(tags_and_words, 1):
if index in covered_indices:
continue
token = Token(index, word, None, tag, tag, None, 0,
'erased', None, None, None)
sentence.append(token)
sentence.sort()
return sentence
|
dmcc/PyStanfordDependencies | StanfordDependencies/CoNLL.py | Sentence.from_stanford_dependencies | python | def from_stanford_dependencies(this_class, stream, tree,
include_erased=False, include_punct=True):
stream = iter(stream)
sentence = this_class()
covered_indices = set()
tags_and_words = ptb_tags_and_words_re.findall(tree)
# perform some basic cleanups
tags_and_words = [(tag, word.replace(r'\/', '/'))
for (tag, word) in tags_and_words if tag != '-NONE-']
for line in stream:
if not line.strip():
if sentence:
# empty line means the sentence is over
break
else:
continue
line = line.replace(r'\/', '/')
matches = deps_re.findall(line)
assert len(matches) == 1
(deprel, gov_form, head, gov_is_copy, form, index,
dep_is_copy) = matches[0]
index = int(index)
tag, word = tags_and_words[index - 1]
assert form == word
covered_indices.add(index)
if not include_punct and deprel == 'punct':
continue
if gov_is_copy or dep_is_copy:
extra = {}
if gov_is_copy:
extra['gov_is_copy'] = len(gov_is_copy)
if dep_is_copy:
extra['dep_is_copy'] = len(dep_is_copy)
else:
extra = None
token = Token(index, form, None, tag, tag, None, int(head),
deprel, None, None, extra)
sentence.append(token)
if include_erased:
# look through words in the tree to see if any of them
# were erased
for index, (tag, word) in enumerate(tags_and_words, 1):
if index in covered_indices:
continue
token = Token(index, word, None, tag, tag, None, 0,
'erased', None, None, None)
sentence.append(token)
sentence.sort()
return sentence | Construct a Sentence. stream is an iterable over strings
where each string is a line representing a Stanford Dependency
as in the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
The corresponding Penn Treebank formatted tree must be provided
as well. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L208-L267 | null | class Sentence(list):
"""Sequence of Token objects."""
def renumber(self):
"""Destructively renumber the indices based on the actual tokens
(e.g., if there are gaps between token indices, this will remove
them). Old Token objects will still exist, so you'll need to
update your references."""
mapping = {0: 0} # old index -> real index
needs_renumbering = False
for real_index, token in enumerate(self, 1):
mapping[token.index] = real_index
if token.index != real_index:
needs_renumbering = True
if needs_renumbering:
# update all indices
self[:] = [token._replace(index=mapping[token.index],
head=mapping[token.head])
for token in self]
def as_conll(self):
"""Represent this Sentence as a string in CoNLL-X format. Note
that this doesn't end in a newline. Also see Corpus.as_conll()
for converting multiple sentences."""
return '\n'.join(token.as_conll() for token in self)
def as_asciitree(self, str_func=None):
"""Represent this Sentence as an ASCII tree string. Requires
the asciitree package. A default token stringifier is provided
but for custom formatting, specify a str_func which should take
a single Token and return a string."""
import asciitree
from collections import defaultdict
children = defaultdict(list)
# since erased nodes may be missing, multiple tokens may have same
# index (CCprocessed), etc.
token_to_index = {}
roots = []
for token in self:
children[token.head].append(token)
token_to_index[token] = token.index
if token.head == 0:
roots.append(token)
assert roots, "Couldn't find root Token(s)"
if len(roots) > 1:
# multiple roots so we make a fake one to be their parent
root = Token(0, 'ROOT', 'ROOT-LEMMA', 'ROOT-CPOS', 'ROOT-POS',
None, None, 'ROOT-DEPREL', None, None, None)
token_to_index[root] = 0
children[0] = roots
else:
root = roots[0]
def child_func(token):
index = token_to_index[token]
return children[index]
if not str_func:
def str_func(token):
return ' %s [%s]' % (token.form, token.deprel)
return asciitree.draw_tree(root, child_func, str_func)
def as_dotgraph(self, digraph_kwargs=None, id_prefix=None,
node_formatter=None, edge_formatter=None):
"""Returns this sentence as a graphviz.Digraph. Requires the
graphviz Python package and graphviz itself. There are several
ways to customize. Graph level keyword arguments can be passed
as a dictionary to digraph_kwargs. If you're viewing multiple
Sentences in the same graph, you'll need to set a unique prefix
string in id_prefix. Lastly, you can change the formatting of
nodes and edges with node_formatter and edge_formatter. Both
take a single Token as an argument (for edge_formatter, the
Token represents the child token) and return a dictionary of
keyword arguments which are passed to the node and edge creation
functions in graphviz. The node_formatter will also be called
with None as its token when adding the root."""
digraph_kwargs = digraph_kwargs or {}
id_prefix = id_prefix or ''
node_formatter = node_formatter or (lambda token: {})
edge_formatter = edge_formatter or (lambda token: {})
import graphviz
graph = graphviz.Digraph(**digraph_kwargs)
# add root node
graph.node(id_prefix + '0', 'root', **node_formatter(None))
# add remaining nodes and edges
already_added = set()
for token in self:
token_id = id_prefix + str(token.index)
parent_id = id_prefix + str(token.head)
if token_id not in already_added:
graph.node(token_id, token.form, **node_formatter(token))
graph.edge(parent_id, token_id, label=token.deprel,
**edge_formatter(token))
already_added.add(token_id)
return graph
@classmethod
def from_conll(this_class, stream):
"""Construct a Sentence. stream is an iterable over strings where
each string is a line in CoNLL-X format. If there are multiple
sentences in this stream, we only return the first one."""
stream = iter(stream)
sentence = this_class()
for line in stream:
line = line.strip()
if line:
sentence.append(Token.from_conll(line))
elif sentence:
return sentence
return sentence
@classmethod
|
dmcc/PyStanfordDependencies | StanfordDependencies/CoNLL.py | Corpus.from_conll | python | def from_conll(this_class, stream):
stream = iter(stream)
corpus = this_class()
while 1:
# read until we get an empty sentence
sentence = Sentence.from_conll(stream)
if sentence:
corpus.append(sentence)
else:
break
return corpus | Construct a Corpus. stream is an iterable over strings where
each string is a line in CoNLL-X format. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L277-L289 | [
"def from_conll(this_class, stream):\n \"\"\"Construct a Sentence. stream is an iterable over strings where\n each string is a line in CoNLL-X format. If there are multiple\n sentences in this stream, we only return the first one.\"\"\"\n stream = iter(stream)\n sentence = this_class()\n for line in stream:\n line = line.strip()\n if line:\n sentence.append(Token.from_conll(line))\n elif sentence:\n return sentence\n return sentence\n"
] | class Corpus(list):
"""Sequence of Sentence objects."""
def as_conll(self):
"""Represent the entire corpus as a string in CoNLL-X format."""
if not self:
return ''
return '\n\n'.join(sentence.as_conll() for sentence in self) + '\n'
@classmethod
@classmethod
def from_stanford_dependencies(this_class, stream, trees,
include_erased=False, include_punct=True):
"""Construct a Corpus. stream is an iterable over strings where
each string is a line representing a Stanford Dependency as in
the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
Sentences are separated by blank lines. A corresponding list of
Penn Treebank formatted trees must be provided as well."""
stream = iter(stream)
corpus = this_class()
for tree in trees:
sentence = Sentence.from_stanford_dependencies(stream,
tree,
include_erased,
include_punct)
corpus.append(sentence)
return corpus
|
dmcc/PyStanfordDependencies | StanfordDependencies/CoNLL.py | Corpus.from_stanford_dependencies | python | def from_stanford_dependencies(this_class, stream, trees,
include_erased=False, include_punct=True):
stream = iter(stream)
corpus = this_class()
for tree in trees:
sentence = Sentence.from_stanford_dependencies(stream,
tree,
include_erased,
include_punct)
corpus.append(sentence)
return corpus | Construct a Corpus. stream is an iterable over strings where
each string is a line representing a Stanford Dependency as in
the output of the command line Stanford Dependency tool:
deprel(gov-index, dep-depindex)
Sentences are separated by blank lines. A corresponding list of
Penn Treebank formatted trees must be provided as well. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/CoNLL.py#L291-L309 | [
"def from_stanford_dependencies(this_class, stream, tree,\n include_erased=False, include_punct=True):\n \"\"\"Construct a Sentence. stream is an iterable over strings\n where each string is a line representing a Stanford Dependency\n as in the output of the command line Stanford Dependency tool:\n\n deprel(gov-index, dep-depindex)\n\n The corresponding Penn Treebank formatted tree must be provided\n as well.\"\"\"\n stream = iter(stream)\n sentence = this_class()\n covered_indices = set()\n tags_and_words = ptb_tags_and_words_re.findall(tree)\n # perform some basic cleanups\n tags_and_words = [(tag, word.replace(r'\\/', '/'))\n for (tag, word) in tags_and_words if tag != '-NONE-']\n for line in stream:\n if not line.strip():\n if sentence:\n # empty line means the sentence is over\n break\n else:\n continue\n line = line.replace(r'\\/', '/')\n matches = deps_re.findall(line)\n assert len(matches) == 1\n (deprel, gov_form, head, gov_is_copy, form, index,\n dep_is_copy) = matches[0]\n index = int(index)\n tag, word = tags_and_words[index - 1]\n assert form == word\n covered_indices.add(index)\n\n if not include_punct and deprel == 'punct':\n continue\n if gov_is_copy or dep_is_copy:\n extra = {}\n if gov_is_copy:\n extra['gov_is_copy'] = len(gov_is_copy)\n if dep_is_copy:\n extra['dep_is_copy'] = len(dep_is_copy)\n else:\n extra = None\n token = Token(index, form, None, tag, tag, None, int(head),\n deprel, None, None, extra)\n sentence.append(token)\n\n if include_erased:\n # look through words in the tree to see if any of them\n # were erased\n for index, (tag, word) in enumerate(tags_and_words, 1):\n if index in covered_indices:\n continue\n token = Token(index, word, None, tag, tag, None, 0,\n 'erased', None, None, None)\n sentence.append(token)\n\n sentence.sort()\n return sentence\n"
] | class Corpus(list):
"""Sequence of Sentence objects."""
def as_conll(self):
"""Represent the entire corpus as a string in CoNLL-X format."""
if not self:
return ''
return '\n\n'.join(sentence.as_conll() for sentence in self) + '\n'
@classmethod
def from_conll(this_class, stream):
"""Construct a Corpus. stream is an iterable over strings where
each string is a line in CoNLL-X format."""
stream = iter(stream)
corpus = this_class()
while 1:
# read until we get an empty sentence
sentence = Sentence.from_conll(stream)
if sentence:
corpus.append(sentence)
else:
break
return corpus
@classmethod
|
dmcc/PyStanfordDependencies | StanfordDependencies/SubprocessBackend.py | SubprocessBackend.convert_trees | python | def convert_trees(self, ptb_trees, representation='basic',
include_punct=True, include_erased=False, universal=True,
debug=False):
self._raise_on_bad_representation(representation)
input_file = tempfile.NamedTemporaryFile(delete=False)
try:
for ptb_tree in ptb_trees:
self._raise_on_bad_input(ptb_tree)
tree_with_line_break = ptb_tree + "\n"
input_file.write(tree_with_line_break.encode("utf-8"))
input_file.flush()
input_file.close()
command = [self.java_command,
'-ea',
'-cp', self.jar_filename,
JAVA_CLASS_NAME,
'-' + representation,
'-treeFile', input_file.name]
# if we're including erased, we want to include punctuation
# since otherwise we won't know what SD considers punctuation
if include_punct or include_erased:
command.append('-keepPunct')
if not universal:
command.append('-originalDependencies')
if debug:
print('Command:', ' '.join(command))
sd_process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
return_code = sd_process.wait()
stderr = sd_process.stderr.read()
stdout = sd_process.stdout.read()
if debug:
print("stdout: {%s}" % stdout)
print("stderr: {%s}" % stderr)
print('Exit code:', return_code)
self._raise_on_bad_exit_or_output(return_code, stderr)
finally:
os.remove(input_file.name)
try:
sentences = Corpus.from_stanford_dependencies(stdout.splitlines(),
ptb_trees,
include_erased,
include_punct)
for sentence, ptb_tree in zip(sentences, ptb_trees):
if len(sentence) == 0:
raise ValueError("Invalid PTB tree: %r" % ptb_tree)
except:
print("Error during conversion")
if not debug:
print("stdout: {%s}" % stdout)
print("stderr: {%s}" % stderr)
raise
assert len(sentences) == len(ptb_trees), \
"Only got %d sentences from Stanford Dependencies when " \
"given %d trees." % (len(sentences), len(ptb_trees))
return sentences | Convert a list of Penn Treebank formatted trees (ptb_trees)
into Stanford Dependencies. The dependencies are represented
as a list of sentences, where each sentence is itself a list of
Token objects.
Currently supported representations are 'basic', 'collapsed',
'CCprocessed', and 'collapsedTree' which behave the same as they
in the CoreNLP command line tools. (note that in the online
CoreNLP demo, 'collapsed' is called 'enhanced')
Setting debug=True will cause debugging information (including
the java command run to be printed. | train | https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/SubprocessBackend.py#L35-L108 | [
"def from_stanford_dependencies(this_class, stream, trees,\n include_erased=False, include_punct=True):\n \"\"\"Construct a Corpus. stream is an iterable over strings where\n each string is a line representing a Stanford Dependency as in\n the output of the command line Stanford Dependency tool:\n\n deprel(gov-index, dep-depindex)\n\n Sentences are separated by blank lines. A corresponding list of\n Penn Treebank formatted trees must be provided as well.\"\"\"\n stream = iter(stream)\n corpus = this_class()\n for tree in trees:\n sentence = Sentence.from_stanford_dependencies(stream,\n tree,\n include_erased,\n include_punct)\n corpus.append(sentence)\n return corpus\n",
"def _raise_on_bad_representation(representation):\n \"\"\"Ensure that representation is a known Stanford Dependency\n representation (raises a ValueError if the representation is\n invalid).\"\"\"\n if representation not in REPRESENTATIONS:\n repr_desc = ', '.join(map(repr, REPRESENTATIONS))\n raise ValueError(\"Unknown representation: %r (should be one \"\n \"of %s)\" % (representation, repr_desc))\n",
"def _raise_on_bad_input(ptb_tree):\n \"\"\"Ensure that ptb_tree is a valid Penn Treebank datatype or\n raises a TypeError. Currently, this requires that ptb_tree is\n a str or basestring (depending on Python version).\"\"\"\n if not isinstance(ptb_tree, string_type):\n raise TypeError(\"ptb_tree is not a string: %r\" % ptb_tree)\n",
"def _raise_on_bad_exit_or_output(return_code, stderr):\n if 'PennTreeReader: warning:' in stderr:\n raise ValueError(\"Tree(s) not in valid Penn Treebank format\")\n\n if return_code:\n if 'Unsupported major.minor version' in stderr:\n # Oracle Java error message\n raise JavaRuntimeVersionError()\n elif 'JVMCFRE003 bad major version' in stderr:\n # IBM Java error message\n raise JavaRuntimeVersionError()\n else:\n raise ValueError(\"Bad exit code from Stanford CoreNLP\")\n"
] | class SubprocessBackend(StanfordDependencies):
"""Interface to Stanford Dependencies via subprocesses. This means
that each call opens a pipe to Java. It has the advantage that it
should work out of the box if you have Java but it is slower than
other backends. As such, convert_trees() will be more efficient than
convert_tree() for this backend."""
def __init__(self, jar_filename=None, download_if_missing=False,
version=None, java_command='java'):
"""java_command is the path to a java binary."""
StanfordDependencies.__init__(self, jar_filename, download_if_missing,
version)
self.java_command = java_command
def convert_tree(self, ptb_tree, **kwargs):
"""Converts a single Penn Treebank formatted tree (a string)
to Stanford Dependencies. See convert_trees for more details."""
return self.convert_trees([ptb_tree], **kwargs)[0]
@staticmethod
def _raise_on_bad_exit_or_output(return_code, stderr):
if 'PennTreeReader: warning:' in stderr:
raise ValueError("Tree(s) not in valid Penn Treebank format")
if return_code:
if 'Unsupported major.minor version' in stderr:
# Oracle Java error message
raise JavaRuntimeVersionError()
elif 'JVMCFRE003 bad major version' in stderr:
# IBM Java error message
raise JavaRuntimeVersionError()
else:
raise ValueError("Bad exit code from Stanford CoreNLP")
|
ChristopherRabotin/bungiesearch | bungiesearch/utils.py | update_index | python | def update_index(model_items, model_name, action='index', bulk_size=100, num_docs=-1, start_date=None, end_date=None, refresh=True):
'''
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
'''
src = Bungiesearch()
if action == 'delete' and not hasattr(model_items, '__iter__'):
raise ValueError("If action is 'delete', model_items must be an iterable of primary keys.")
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
model = index_instance.get_model()
if num_docs == -1:
if isinstance(model_items, (list, tuple)):
num_docs = len(model_items)
else:
model_items = filter_model_items(index_instance, model_items, model_name, start_date, end_date)
num_docs = model_items.count()
if not model_items.ordered:
model_items = model_items.order_by('pk')
else:
logger.warning('Limiting the number of model_items to {} to {}.'.format(action, num_docs))
logger.info('{} {} documents on index {}'.format(action, num_docs, index_name))
prev_step = 0
max_docs = num_docs + bulk_size if num_docs > bulk_size else bulk_size + 1
for next_step in range(bulk_size, max_docs, bulk_size):
logger.info('{}: documents {} to {} of {} total on index {}.'.format(action.capitalize(), prev_step, next_step, num_docs, index_name))
data = create_indexed_document(index_instance, model_items[prev_step:next_step], action)
bulk_index(src.get_es_instance(), data, index=index_name, doc_type=model.__name__, raise_on_error=True)
prev_step = next_step
if refresh:
src.get_es_instance().indices.refresh(index=index_name) | Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/utils.py#L15-L64 | [
"def filter_model_items(index_instance, model_items, model_name, start_date, end_date):\n ''' Filters the model items queryset based on start and end date.'''\n if index_instance.updated_field is None:\n logger.warning(\"No updated date field found for {} - not restricting with start and end date\".format(model_name))\n else:\n if start_date:\n model_items = model_items.filter(**{'{}__gte'.format(index_instance.updated_field): __str_to_tzdate__(start_date)})\n if end_date:\n model_items = model_items.filter(**{'{}__lte'.format(index_instance.updated_field): __str_to_tzdate__(end_date)})\n\n return model_items\n",
"def create_indexed_document(index_instance, model_items, action):\n '''\n Creates the document that will be passed into the bulk index function.\n Either a list of serialized objects to index, or a a dictionary specifying the primary keys of items to be delete.\n '''\n data = []\n if action == 'delete':\n for pk in model_items:\n data.append({'_id': pk, '_op_type': action})\n else:\n for doc in model_items:\n if index_instance.matches_indexing_condition(doc):\n data.append(index_instance.serialize_object(doc))\n return data\n"
] | from dateutil.parser import parse as parsedt
from django.utils import timezone
from elasticsearch.exceptions import NotFoundError
from . import Bungiesearch
from .logger import logger
try:
from elasticsearch.helpers import bulk_index
except ImportError:
from elasticsearch.helpers import bulk as bulk_index
def delete_index_item(item, model_name, refresh=True):
'''
Deletes an item from the index.
:param item: must be a serializable object.
:param model_name: doctype, which must also be the model name.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
'''
src = Bungiesearch()
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
item_es_id = index_instance.fields['_id'].value(item)
try:
src.get_es_instance().delete(index_name, model_name, item_es_id)
except NotFoundError as e:
logger.warning('NotFoundError: could not delete {}.{} from index {}: {}.'.format(model_name, item_es_id, index_name, str(e)))
if refresh:
src.get_es_instance().indices.refresh(index=index_name)
def create_indexed_document(index_instance, model_items, action):
'''
Creates the document that will be passed into the bulk index function.
Either a list of serialized objects to index, or a a dictionary specifying the primary keys of items to be delete.
'''
data = []
if action == 'delete':
for pk in model_items:
data.append({'_id': pk, '_op_type': action})
else:
for doc in model_items:
if index_instance.matches_indexing_condition(doc):
data.append(index_instance.serialize_object(doc))
return data
def filter_model_items(index_instance, model_items, model_name, start_date, end_date):
''' Filters the model items queryset based on start and end date.'''
if index_instance.updated_field is None:
logger.warning("No updated date field found for {} - not restricting with start and end date".format(model_name))
else:
if start_date:
model_items = model_items.filter(**{'{}__gte'.format(index_instance.updated_field): __str_to_tzdate__(start_date)})
if end_date:
model_items = model_items.filter(**{'{}__lte'.format(index_instance.updated_field): __str_to_tzdate__(end_date)})
return model_items
def __str_to_tzdate__(date_str):
return timezone.make_aware(parsedt(date_str), timezone.get_current_timezone())
|
ChristopherRabotin/bungiesearch | bungiesearch/utils.py | delete_index_item | python | def delete_index_item(item, model_name, refresh=True):
'''
Deletes an item from the index.
:param item: must be a serializable object.
:param model_name: doctype, which must also be the model name.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
'''
src = Bungiesearch()
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
item_es_id = index_instance.fields['_id'].value(item)
try:
src.get_es_instance().delete(index_name, model_name, item_es_id)
except NotFoundError as e:
logger.warning('NotFoundError: could not delete {}.{} from index {}: {}.'.format(model_name, item_es_id, index_name, str(e)))
if refresh:
src.get_es_instance().indices.refresh(index=index_name) | Deletes an item from the index.
:param item: must be a serializable object.
:param model_name: doctype, which must also be the model name.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/utils.py#L67-L87 | null | from dateutil.parser import parse as parsedt
from django.utils import timezone
from elasticsearch.exceptions import NotFoundError
from . import Bungiesearch
from .logger import logger
try:
from elasticsearch.helpers import bulk_index
except ImportError:
from elasticsearch.helpers import bulk as bulk_index
def update_index(model_items, model_name, action='index', bulk_size=100, num_docs=-1, start_date=None, end_date=None, refresh=True):
'''
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
'''
src = Bungiesearch()
if action == 'delete' and not hasattr(model_items, '__iter__'):
raise ValueError("If action is 'delete', model_items must be an iterable of primary keys.")
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
model = index_instance.get_model()
if num_docs == -1:
if isinstance(model_items, (list, tuple)):
num_docs = len(model_items)
else:
model_items = filter_model_items(index_instance, model_items, model_name, start_date, end_date)
num_docs = model_items.count()
if not model_items.ordered:
model_items = model_items.order_by('pk')
else:
logger.warning('Limiting the number of model_items to {} to {}.'.format(action, num_docs))
logger.info('{} {} documents on index {}'.format(action, num_docs, index_name))
prev_step = 0
max_docs = num_docs + bulk_size if num_docs > bulk_size else bulk_size + 1
for next_step in range(bulk_size, max_docs, bulk_size):
logger.info('{}: documents {} to {} of {} total on index {}.'.format(action.capitalize(), prev_step, next_step, num_docs, index_name))
data = create_indexed_document(index_instance, model_items[prev_step:next_step], action)
bulk_index(src.get_es_instance(), data, index=index_name, doc_type=model.__name__, raise_on_error=True)
prev_step = next_step
if refresh:
src.get_es_instance().indices.refresh(index=index_name)
def create_indexed_document(index_instance, model_items, action):
'''
Creates the document that will be passed into the bulk index function.
Either a list of serialized objects to index, or a a dictionary specifying the primary keys of items to be delete.
'''
data = []
if action == 'delete':
for pk in model_items:
data.append({'_id': pk, '_op_type': action})
else:
for doc in model_items:
if index_instance.matches_indexing_condition(doc):
data.append(index_instance.serialize_object(doc))
return data
def filter_model_items(index_instance, model_items, model_name, start_date, end_date):
''' Filters the model items queryset based on start and end date.'''
if index_instance.updated_field is None:
logger.warning("No updated date field found for {} - not restricting with start and end date".format(model_name))
else:
if start_date:
model_items = model_items.filter(**{'{}__gte'.format(index_instance.updated_field): __str_to_tzdate__(start_date)})
if end_date:
model_items = model_items.filter(**{'{}__lte'.format(index_instance.updated_field): __str_to_tzdate__(end_date)})
return model_items
def __str_to_tzdate__(date_str):
return timezone.make_aware(parsedt(date_str), timezone.get_current_timezone())
|
ChristopherRabotin/bungiesearch | bungiesearch/utils.py | create_indexed_document | python | def create_indexed_document(index_instance, model_items, action):
'''
Creates the document that will be passed into the bulk index function.
Either a list of serialized objects to index, or a a dictionary specifying the primary keys of items to be delete.
'''
data = []
if action == 'delete':
for pk in model_items:
data.append({'_id': pk, '_op_type': action})
else:
for doc in model_items:
if index_instance.matches_indexing_condition(doc):
data.append(index_instance.serialize_object(doc))
return data | Creates the document that will be passed into the bulk index function.
Either a list of serialized objects to index, or a a dictionary specifying the primary keys of items to be delete. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/utils.py#L90-L103 | null | from dateutil.parser import parse as parsedt
from django.utils import timezone
from elasticsearch.exceptions import NotFoundError
from . import Bungiesearch
from .logger import logger
try:
from elasticsearch.helpers import bulk_index
except ImportError:
from elasticsearch.helpers import bulk as bulk_index
def update_index(model_items, model_name, action='index', bulk_size=100, num_docs=-1, start_date=None, end_date=None, refresh=True):
'''
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
'''
src = Bungiesearch()
if action == 'delete' and not hasattr(model_items, '__iter__'):
raise ValueError("If action is 'delete', model_items must be an iterable of primary keys.")
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
model = index_instance.get_model()
if num_docs == -1:
if isinstance(model_items, (list, tuple)):
num_docs = len(model_items)
else:
model_items = filter_model_items(index_instance, model_items, model_name, start_date, end_date)
num_docs = model_items.count()
if not model_items.ordered:
model_items = model_items.order_by('pk')
else:
logger.warning('Limiting the number of model_items to {} to {}.'.format(action, num_docs))
logger.info('{} {} documents on index {}'.format(action, num_docs, index_name))
prev_step = 0
max_docs = num_docs + bulk_size if num_docs > bulk_size else bulk_size + 1
for next_step in range(bulk_size, max_docs, bulk_size):
logger.info('{}: documents {} to {} of {} total on index {}.'.format(action.capitalize(), prev_step, next_step, num_docs, index_name))
data = create_indexed_document(index_instance, model_items[prev_step:next_step], action)
bulk_index(src.get_es_instance(), data, index=index_name, doc_type=model.__name__, raise_on_error=True)
prev_step = next_step
if refresh:
src.get_es_instance().indices.refresh(index=index_name)
def delete_index_item(item, model_name, refresh=True):
'''
Deletes an item from the index.
:param item: must be a serializable object.
:param model_name: doctype, which must also be the model name.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
'''
src = Bungiesearch()
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
item_es_id = index_instance.fields['_id'].value(item)
try:
src.get_es_instance().delete(index_name, model_name, item_es_id)
except NotFoundError as e:
logger.warning('NotFoundError: could not delete {}.{} from index {}: {}.'.format(model_name, item_es_id, index_name, str(e)))
if refresh:
src.get_es_instance().indices.refresh(index=index_name)
def filter_model_items(index_instance, model_items, model_name, start_date, end_date):
''' Filters the model items queryset based on start and end date.'''
if index_instance.updated_field is None:
logger.warning("No updated date field found for {} - not restricting with start and end date".format(model_name))
else:
if start_date:
model_items = model_items.filter(**{'{}__gte'.format(index_instance.updated_field): __str_to_tzdate__(start_date)})
if end_date:
model_items = model_items.filter(**{'{}__lte'.format(index_instance.updated_field): __str_to_tzdate__(end_date)})
return model_items
def __str_to_tzdate__(date_str):
return timezone.make_aware(parsedt(date_str), timezone.get_current_timezone())
|
ChristopherRabotin/bungiesearch | bungiesearch/utils.py | filter_model_items | python | def filter_model_items(index_instance, model_items, model_name, start_date, end_date):
''' Filters the model items queryset based on start and end date.'''
if index_instance.updated_field is None:
logger.warning("No updated date field found for {} - not restricting with start and end date".format(model_name))
else:
if start_date:
model_items = model_items.filter(**{'{}__gte'.format(index_instance.updated_field): __str_to_tzdate__(start_date)})
if end_date:
model_items = model_items.filter(**{'{}__lte'.format(index_instance.updated_field): __str_to_tzdate__(end_date)})
return model_items | Filters the model items queryset based on start and end date. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/utils.py#L106-L116 | [
"def __str_to_tzdate__(date_str):\n return timezone.make_aware(parsedt(date_str), timezone.get_current_timezone())\n"
] | from dateutil.parser import parse as parsedt
from django.utils import timezone
from elasticsearch.exceptions import NotFoundError
from . import Bungiesearch
from .logger import logger
try:
from elasticsearch.helpers import bulk_index
except ImportError:
from elasticsearch.helpers import bulk as bulk_index
def update_index(model_items, model_name, action='index', bulk_size=100, num_docs=-1, start_date=None, end_date=None, refresh=True):
'''
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
'''
src = Bungiesearch()
if action == 'delete' and not hasattr(model_items, '__iter__'):
raise ValueError("If action is 'delete', model_items must be an iterable of primary keys.")
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
model = index_instance.get_model()
if num_docs == -1:
if isinstance(model_items, (list, tuple)):
num_docs = len(model_items)
else:
model_items = filter_model_items(index_instance, model_items, model_name, start_date, end_date)
num_docs = model_items.count()
if not model_items.ordered:
model_items = model_items.order_by('pk')
else:
logger.warning('Limiting the number of model_items to {} to {}.'.format(action, num_docs))
logger.info('{} {} documents on index {}'.format(action, num_docs, index_name))
prev_step = 0
max_docs = num_docs + bulk_size if num_docs > bulk_size else bulk_size + 1
for next_step in range(bulk_size, max_docs, bulk_size):
logger.info('{}: documents {} to {} of {} total on index {}.'.format(action.capitalize(), prev_step, next_step, num_docs, index_name))
data = create_indexed_document(index_instance, model_items[prev_step:next_step], action)
bulk_index(src.get_es_instance(), data, index=index_name, doc_type=model.__name__, raise_on_error=True)
prev_step = next_step
if refresh:
src.get_es_instance().indices.refresh(index=index_name)
def delete_index_item(item, model_name, refresh=True):
'''
Deletes an item from the index.
:param item: must be a serializable object.
:param model_name: doctype, which must also be the model name.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
'''
src = Bungiesearch()
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
item_es_id = index_instance.fields['_id'].value(item)
try:
src.get_es_instance().delete(index_name, model_name, item_es_id)
except NotFoundError as e:
logger.warning('NotFoundError: could not delete {}.{} from index {}: {}.'.format(model_name, item_es_id, index_name, str(e)))
if refresh:
src.get_es_instance().indices.refresh(index=index_name)
def create_indexed_document(index_instance, model_items, action):
'''
Creates the document that will be passed into the bulk index function.
Either a list of serialized objects to index, or a a dictionary specifying the primary keys of items to be delete.
'''
data = []
if action == 'delete':
for pk in model_items:
data.append({'_id': pk, '_op_type': action})
else:
for doc in model_items:
if index_instance.matches_indexing_condition(doc):
data.append(index_instance.serialize_object(doc))
return data
def __str_to_tzdate__(date_str):
return timezone.make_aware(parsedt(date_str), timezone.get_current_timezone())
|
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | Bungiesearch.get_index | python | def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model)) | Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L98-L108 | null | class Bungiesearch(Search):
'''
This object is used to read Django settings and initialize the elasticsearch connection.
'''
DEFAULT_TIMEOUT = 5
BUNGIE = settings.BUNGIESEARCH
# The following code loads each model index_name module (as defined in the settings) and stores
# index_name name to model index_name, and index_name name to model. Settings shouldn't change between
# subsequent calls to Search(), which is why this is static code.
_cached_es_instances = {}
# Let's go through the settings in order to map each defined Model/ModelIndex to the elasticsearch index_name.
_model_to_index, _model_name_to_index, _model_name_to_model_idx = defaultdict(list), defaultdict(list), defaultdict(list)
_index_to_model, _idx_name_to_mdl_to_mdlidx = defaultdict(list), defaultdict(dict)
_model_name_to_default_index, _alias_hooks = {}, {}
_managed_models = []
__loaded_indices__ = False
@classmethod
def __load_settings__(cls):
if cls.__loaded_indices__:
return
cls.__loaded_indices__ = True
# Loading indices.
for index_name, module_str in iteritems(cls.BUNGIE['INDICES']):
index_module = import_module(module_str)
for index_obj in itervalues(index_module.__dict__):
try:
if issubclass(index_obj, ModelIndex) and index_obj != ModelIndex:
index_instance = index_obj()
assoc_model = index_instance.get_model()
cls._index_to_model[index_name].append(assoc_model)
cls._model_name_to_model_idx[assoc_model.__name__].append(index_instance)
cls._idx_name_to_mdl_to_mdlidx[index_name][assoc_model.__name__] = index_instance
if index_instance.is_default:
if assoc_model.__name__ in cls._model_name_to_default_index:
raise AttributeError('ModelIndex {} on index {} is marked as default, but {} was already set as default.'.format(index_instance, index_name, cls._model_name_to_default_index[assoc_model.__name__]))
cls._model_name_to_default_index[assoc_model.__name__] = index_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
# Create reverse maps in order to have O(1) access.
for index_name, models in iteritems(cls._index_to_model):
for model in models:
cls._model_to_index[model].append(index_name)
cls._model_name_to_index[model.__name__].append(index_name)
# Loading aliases.
for alias_prefix, module_str in iteritems(cls.BUNGIE.get('ALIASES', {})):
if alias_prefix is None:
alias_prefix = 'bungie'
if alias_prefix != '':
alias_prefix += '_'
alias_module = import_module(module_str)
for alias_obj in itervalues(alias_module.__dict__):
try:
if issubclass(alias_obj, SearchAlias) and alias_obj != SearchAlias:
alias_instance = alias_obj()
cls._alias_hooks[alias_prefix + alias_instance.alias_name] = alias_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
@classmethod
def _build_key(cls, urls, timeout, **settings):
# Order the settings by key and then turn it into a string with
# repr. There are a lot of edge cases here, but the worst that
# happens is that the key is different and so you get a new
# Elasticsearch. We'll probably have to tweak this.
settings = sorted(settings.items(), key=lambda item: item[0])
settings = repr([(k, v) for k, v in settings])
# elasticsearch allows URLs to be a string, so we make sure to
# account for that when converting whatever it is into a tuple.
if isinstance(urls, string_types):
urls = (urls,)
else:
urls = tuple(urls)
# Generate a tuple of all the bits and return that as the key
# because that's hashable.
key = (urls, timeout, settings)
return key
@classmethod
@classmethod
def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model))
@classmethod
def get_indices(cls):
'''
Returns the list of indices defined in the settings.
'''
return cls._idx_name_to_mdl_to_mdlidx.keys()
@classmethod
def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results
def __init__(self, urls=None, timeout=None, force_new=False, raw_results=False, **kwargs):
'''
Creates a new ElasticSearch DSL object. Grabs the ElasticSearch connection from the pool
if it has already been initialized. Otherwise, creates a new one.
If no parameters are passed, everything is determined from the Django settings.
:param urls: A list of URLs, or a single string of URL (without leading `http://`), or None to read from settings.
:param idx: A list of indices or a single string representing an index_name name. Is optional. Will be merged with `idx_alias`.
:param idx_alias: A list of index_name aliases or a single string representing an index_name alias, as defined in the settings. Will be merged with `index_name`.
:param timeout: Timeout used in the connection.
:param force_new: Set to `True` to force a new elasticsearch connection. Otherwise will aggressively use any connection with the exact same settings.
:param **kwargs: Additional settings to pass to the low level elasticsearch client and to elasticsearch-sal-py.search.Search.
'''
Bungiesearch.__load_settings__()
urls = urls or Bungiesearch.BUNGIE['URLS']
if not timeout:
timeout = Bungiesearch.BUNGIE.get('TIMEOUT', Bungiesearch.DEFAULT_TIMEOUT)
search_keys = ['using', 'index', 'doc_type', 'extra']
search_settings, es_settings = {}, {}
for k, v in iteritems(kwargs):
if k in search_keys:
search_settings[k] = v
else:
es_settings[k] = v
if not es_settings:
# If there aren't any provided elasticsearch settings, let's see if it's defined in the settings.
es_settings = Bungiesearch.BUNGIE.get('ES_SETTINGS', {})
# Building a caching key to cache the es_instance for later use (and retrieved a previously cached es_instance).
cache_key = Bungiesearch._build_key(urls, timeout, **es_settings)
es_instance = None
if not force_new:
if cache_key in Bungiesearch._cached_es_instances:
es_instance = Bungiesearch._cached_es_instances[cache_key]
if not es_instance:
es_instance = Elasticsearch(urls, timeout=timeout, **es_settings)
Bungiesearch._cached_es_instances[cache_key] = es_instance
if 'using' not in search_settings:
search_settings['using'] = es_instance
super(Bungiesearch, self).__init__(**search_settings)
# Creating instance attributes.
self._only = [] # Stores the exact fields to fetch from the database when mapping.
self.results = [] # Store the mapped and unmapped results.
self._raw_results_only = raw_results
def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance
def get_es_instance(self):
'''
Returns the low level elasticsearch instance to perform low level operations.
'''
return self._using
def execute_raw(self):
self.raw_results = super(Bungiesearch, self).execute()
def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results
def map_results(self):
'''
Maps raw results and store them.
'''
self.results = Bungiesearch.map_raw_results(self.raw_results, self)
def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s
def __iter__(self):
'''
Allows iterating on the response.
'''
self.execute()
return iter(self.results)
def __len__(self):
'''
Return elasticsearch-dsl-py count.
'''
return self.count()
def __getitem__(self, key):
'''
Overwriting the step in slice. It is used to set the results either as elasticsearch-dsl-py response object, or
attempt to fetch the Django model instance.
:warning: Getting an item will execute this search. Any search operation or field setting *must* be done prior to getting an item.
'''
if isinstance(key, slice):
if key.step is not None:
self._raw_results_only = key.step
if key.start is not None and key.stop is not None:
single_item = key.start - key.stop == -1
elif key.start is None and key.stop == 1:
single_item = True
else:
single_item = False
key = slice(key.start, key.stop)
else:
single_item = False
else:
single_item = True
results = super(Bungiesearch, self).__getitem__(key).execute()
if single_item:
try:
return results[0]
except IndexError:
return []
return results
def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
'''
return self.hook_alias(alias)
|
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | Bungiesearch.get_model_index | python | def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model)) | Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L111-L122 | null | class Bungiesearch(Search):
'''
This object is used to read Django settings and initialize the elasticsearch connection.
'''
DEFAULT_TIMEOUT = 5
BUNGIE = settings.BUNGIESEARCH
# The following code loads each model index_name module (as defined in the settings) and stores
# index_name name to model index_name, and index_name name to model. Settings shouldn't change between
# subsequent calls to Search(), which is why this is static code.
_cached_es_instances = {}
# Let's go through the settings in order to map each defined Model/ModelIndex to the elasticsearch index_name.
_model_to_index, _model_name_to_index, _model_name_to_model_idx = defaultdict(list), defaultdict(list), defaultdict(list)
_index_to_model, _idx_name_to_mdl_to_mdlidx = defaultdict(list), defaultdict(dict)
_model_name_to_default_index, _alias_hooks = {}, {}
_managed_models = []
__loaded_indices__ = False
@classmethod
def __load_settings__(cls):
if cls.__loaded_indices__:
return
cls.__loaded_indices__ = True
# Loading indices.
for index_name, module_str in iteritems(cls.BUNGIE['INDICES']):
index_module = import_module(module_str)
for index_obj in itervalues(index_module.__dict__):
try:
if issubclass(index_obj, ModelIndex) and index_obj != ModelIndex:
index_instance = index_obj()
assoc_model = index_instance.get_model()
cls._index_to_model[index_name].append(assoc_model)
cls._model_name_to_model_idx[assoc_model.__name__].append(index_instance)
cls._idx_name_to_mdl_to_mdlidx[index_name][assoc_model.__name__] = index_instance
if index_instance.is_default:
if assoc_model.__name__ in cls._model_name_to_default_index:
raise AttributeError('ModelIndex {} on index {} is marked as default, but {} was already set as default.'.format(index_instance, index_name, cls._model_name_to_default_index[assoc_model.__name__]))
cls._model_name_to_default_index[assoc_model.__name__] = index_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
# Create reverse maps in order to have O(1) access.
for index_name, models in iteritems(cls._index_to_model):
for model in models:
cls._model_to_index[model].append(index_name)
cls._model_name_to_index[model.__name__].append(index_name)
# Loading aliases.
for alias_prefix, module_str in iteritems(cls.BUNGIE.get('ALIASES', {})):
if alias_prefix is None:
alias_prefix = 'bungie'
if alias_prefix != '':
alias_prefix += '_'
alias_module = import_module(module_str)
for alias_obj in itervalues(alias_module.__dict__):
try:
if issubclass(alias_obj, SearchAlias) and alias_obj != SearchAlias:
alias_instance = alias_obj()
cls._alias_hooks[alias_prefix + alias_instance.alias_name] = alias_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
@classmethod
def _build_key(cls, urls, timeout, **settings):
# Order the settings by key and then turn it into a string with
# repr. There are a lot of edge cases here, but the worst that
# happens is that the key is different and so you get a new
# Elasticsearch. We'll probably have to tweak this.
settings = sorted(settings.items(), key=lambda item: item[0])
settings = repr([(k, v) for k, v in settings])
# elasticsearch allows URLs to be a string, so we make sure to
# account for that when converting whatever it is into a tuple.
if isinstance(urls, string_types):
urls = (urls,)
else:
urls = tuple(urls)
# Generate a tuple of all the bits and return that as the key
# because that's hashable.
key = (urls, timeout, settings)
return key
@classmethod
def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model))
@classmethod
@classmethod
def get_indices(cls):
'''
Returns the list of indices defined in the settings.
'''
return cls._idx_name_to_mdl_to_mdlidx.keys()
@classmethod
def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results
def __init__(self, urls=None, timeout=None, force_new=False, raw_results=False, **kwargs):
'''
Creates a new ElasticSearch DSL object. Grabs the ElasticSearch connection from the pool
if it has already been initialized. Otherwise, creates a new one.
If no parameters are passed, everything is determined from the Django settings.
:param urls: A list of URLs, or a single string of URL (without leading `http://`), or None to read from settings.
:param idx: A list of indices or a single string representing an index_name name. Is optional. Will be merged with `idx_alias`.
:param idx_alias: A list of index_name aliases or a single string representing an index_name alias, as defined in the settings. Will be merged with `index_name`.
:param timeout: Timeout used in the connection.
:param force_new: Set to `True` to force a new elasticsearch connection. Otherwise will aggressively use any connection with the exact same settings.
:param **kwargs: Additional settings to pass to the low level elasticsearch client and to elasticsearch-sal-py.search.Search.
'''
Bungiesearch.__load_settings__()
urls = urls or Bungiesearch.BUNGIE['URLS']
if not timeout:
timeout = Bungiesearch.BUNGIE.get('TIMEOUT', Bungiesearch.DEFAULT_TIMEOUT)
search_keys = ['using', 'index', 'doc_type', 'extra']
search_settings, es_settings = {}, {}
for k, v in iteritems(kwargs):
if k in search_keys:
search_settings[k] = v
else:
es_settings[k] = v
if not es_settings:
# If there aren't any provided elasticsearch settings, let's see if it's defined in the settings.
es_settings = Bungiesearch.BUNGIE.get('ES_SETTINGS', {})
# Building a caching key to cache the es_instance for later use (and retrieved a previously cached es_instance).
cache_key = Bungiesearch._build_key(urls, timeout, **es_settings)
es_instance = None
if not force_new:
if cache_key in Bungiesearch._cached_es_instances:
es_instance = Bungiesearch._cached_es_instances[cache_key]
if not es_instance:
es_instance = Elasticsearch(urls, timeout=timeout, **es_settings)
Bungiesearch._cached_es_instances[cache_key] = es_instance
if 'using' not in search_settings:
search_settings['using'] = es_instance
super(Bungiesearch, self).__init__(**search_settings)
# Creating instance attributes.
self._only = [] # Stores the exact fields to fetch from the database when mapping.
self.results = [] # Store the mapped and unmapped results.
self._raw_results_only = raw_results
def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance
def get_es_instance(self):
'''
Returns the low level elasticsearch instance to perform low level operations.
'''
return self._using
def execute_raw(self):
self.raw_results = super(Bungiesearch, self).execute()
def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results
def map_results(self):
'''
Maps raw results and store them.
'''
self.results = Bungiesearch.map_raw_results(self.raw_results, self)
def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s
def __iter__(self):
'''
Allows iterating on the response.
'''
self.execute()
return iter(self.results)
def __len__(self):
'''
Return elasticsearch-dsl-py count.
'''
return self.count()
def __getitem__(self, key):
'''
Overwriting the step in slice. It is used to set the results either as elasticsearch-dsl-py response object, or
attempt to fetch the Django model instance.
:warning: Getting an item will execute this search. Any search operation or field setting *must* be done prior to getting an item.
'''
if isinstance(key, slice):
if key.step is not None:
self._raw_results_only = key.step
if key.start is not None and key.stop is not None:
single_item = key.start - key.stop == -1
elif key.start is None and key.stop == 1:
single_item = True
else:
single_item = False
key = slice(key.start, key.stop)
else:
single_item = False
else:
single_item = True
results = super(Bungiesearch, self).__getitem__(key).execute()
if single_item:
try:
return results[0]
except IndexError:
return []
return results
def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
'''
return self.hook_alias(alias)
|
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | Bungiesearch.get_models | python | def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index)) | Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L132-L141 | null | class Bungiesearch(Search):
'''
This object is used to read Django settings and initialize the elasticsearch connection.
'''
DEFAULT_TIMEOUT = 5
BUNGIE = settings.BUNGIESEARCH
# The following code loads each model index_name module (as defined in the settings) and stores
# index_name name to model index_name, and index_name name to model. Settings shouldn't change between
# subsequent calls to Search(), which is why this is static code.
_cached_es_instances = {}
# Let's go through the settings in order to map each defined Model/ModelIndex to the elasticsearch index_name.
_model_to_index, _model_name_to_index, _model_name_to_model_idx = defaultdict(list), defaultdict(list), defaultdict(list)
_index_to_model, _idx_name_to_mdl_to_mdlidx = defaultdict(list), defaultdict(dict)
_model_name_to_default_index, _alias_hooks = {}, {}
_managed_models = []
__loaded_indices__ = False
@classmethod
def __load_settings__(cls):
if cls.__loaded_indices__:
return
cls.__loaded_indices__ = True
# Loading indices.
for index_name, module_str in iteritems(cls.BUNGIE['INDICES']):
index_module = import_module(module_str)
for index_obj in itervalues(index_module.__dict__):
try:
if issubclass(index_obj, ModelIndex) and index_obj != ModelIndex:
index_instance = index_obj()
assoc_model = index_instance.get_model()
cls._index_to_model[index_name].append(assoc_model)
cls._model_name_to_model_idx[assoc_model.__name__].append(index_instance)
cls._idx_name_to_mdl_to_mdlidx[index_name][assoc_model.__name__] = index_instance
if index_instance.is_default:
if assoc_model.__name__ in cls._model_name_to_default_index:
raise AttributeError('ModelIndex {} on index {} is marked as default, but {} was already set as default.'.format(index_instance, index_name, cls._model_name_to_default_index[assoc_model.__name__]))
cls._model_name_to_default_index[assoc_model.__name__] = index_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
# Create reverse maps in order to have O(1) access.
for index_name, models in iteritems(cls._index_to_model):
for model in models:
cls._model_to_index[model].append(index_name)
cls._model_name_to_index[model.__name__].append(index_name)
# Loading aliases.
for alias_prefix, module_str in iteritems(cls.BUNGIE.get('ALIASES', {})):
if alias_prefix is None:
alias_prefix = 'bungie'
if alias_prefix != '':
alias_prefix += '_'
alias_module = import_module(module_str)
for alias_obj in itervalues(alias_module.__dict__):
try:
if issubclass(alias_obj, SearchAlias) and alias_obj != SearchAlias:
alias_instance = alias_obj()
cls._alias_hooks[alias_prefix + alias_instance.alias_name] = alias_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
@classmethod
def _build_key(cls, urls, timeout, **settings):
# Order the settings by key and then turn it into a string with
# repr. There are a lot of edge cases here, but the worst that
# happens is that the key is different and so you get a new
# Elasticsearch. We'll probably have to tweak this.
settings = sorted(settings.items(), key=lambda item: item[0])
settings = repr([(k, v) for k, v in settings])
# elasticsearch allows URLs to be a string, so we make sure to
# account for that when converting whatever it is into a tuple.
if isinstance(urls, string_types):
urls = (urls,)
else:
urls = tuple(urls)
# Generate a tuple of all the bits and return that as the key
# because that's hashable.
key = (urls, timeout, settings)
return key
@classmethod
def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model))
@classmethod
def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model))
@classmethod
def get_indices(cls):
'''
Returns the list of indices defined in the settings.
'''
return cls._idx_name_to_mdl_to_mdlidx.keys()
@classmethod
@classmethod
def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results
def __init__(self, urls=None, timeout=None, force_new=False, raw_results=False, **kwargs):
'''
Creates a new ElasticSearch DSL object. Grabs the ElasticSearch connection from the pool
if it has already been initialized. Otherwise, creates a new one.
If no parameters are passed, everything is determined from the Django settings.
:param urls: A list of URLs, or a single string of URL (without leading `http://`), or None to read from settings.
:param idx: A list of indices or a single string representing an index_name name. Is optional. Will be merged with `idx_alias`.
:param idx_alias: A list of index_name aliases or a single string representing an index_name alias, as defined in the settings. Will be merged with `index_name`.
:param timeout: Timeout used in the connection.
:param force_new: Set to `True` to force a new elasticsearch connection. Otherwise will aggressively use any connection with the exact same settings.
:param **kwargs: Additional settings to pass to the low level elasticsearch client and to elasticsearch-sal-py.search.Search.
'''
Bungiesearch.__load_settings__()
urls = urls or Bungiesearch.BUNGIE['URLS']
if not timeout:
timeout = Bungiesearch.BUNGIE.get('TIMEOUT', Bungiesearch.DEFAULT_TIMEOUT)
search_keys = ['using', 'index', 'doc_type', 'extra']
search_settings, es_settings = {}, {}
for k, v in iteritems(kwargs):
if k in search_keys:
search_settings[k] = v
else:
es_settings[k] = v
if not es_settings:
# If there aren't any provided elasticsearch settings, let's see if it's defined in the settings.
es_settings = Bungiesearch.BUNGIE.get('ES_SETTINGS', {})
# Building a caching key to cache the es_instance for later use (and retrieved a previously cached es_instance).
cache_key = Bungiesearch._build_key(urls, timeout, **es_settings)
es_instance = None
if not force_new:
if cache_key in Bungiesearch._cached_es_instances:
es_instance = Bungiesearch._cached_es_instances[cache_key]
if not es_instance:
es_instance = Elasticsearch(urls, timeout=timeout, **es_settings)
Bungiesearch._cached_es_instances[cache_key] = es_instance
if 'using' not in search_settings:
search_settings['using'] = es_instance
super(Bungiesearch, self).__init__(**search_settings)
# Creating instance attributes.
self._only = [] # Stores the exact fields to fetch from the database when mapping.
self.results = [] # Store the mapped and unmapped results.
self._raw_results_only = raw_results
def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance
def get_es_instance(self):
'''
Returns the low level elasticsearch instance to perform low level operations.
'''
return self._using
def execute_raw(self):
self.raw_results = super(Bungiesearch, self).execute()
def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results
def map_results(self):
'''
Maps raw results and store them.
'''
self.results = Bungiesearch.map_raw_results(self.raw_results, self)
def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s
def __iter__(self):
'''
Allows iterating on the response.
'''
self.execute()
return iter(self.results)
def __len__(self):
'''
Return elasticsearch-dsl-py count.
'''
return self.count()
def __getitem__(self, key):
'''
Overwriting the step in slice. It is used to set the results either as elasticsearch-dsl-py response object, or
attempt to fetch the Django model instance.
:warning: Getting an item will execute this search. Any search operation or field setting *must* be done prior to getting an item.
'''
if isinstance(key, slice):
if key.step is not None:
self._raw_results_only = key.step
if key.start is not None and key.stop is not None:
single_item = key.start - key.stop == -1
elif key.start is None and key.stop == 1:
single_item = True
else:
single_item = False
key = slice(key.start, key.stop)
else:
single_item = False
else:
single_item = True
results = super(Bungiesearch, self).__getitem__(key).execute()
if single_item:
try:
return results[0]
except IndexError:
return []
return results
def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
'''
return self.hook_alias(alias)
|
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | Bungiesearch.get_model_indices | python | def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index)) | Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L144-L152 | null | class Bungiesearch(Search):
'''
This object is used to read Django settings and initialize the elasticsearch connection.
'''
DEFAULT_TIMEOUT = 5
BUNGIE = settings.BUNGIESEARCH
# The following code loads each model index_name module (as defined in the settings) and stores
# index_name name to model index_name, and index_name name to model. Settings shouldn't change between
# subsequent calls to Search(), which is why this is static code.
_cached_es_instances = {}
# Let's go through the settings in order to map each defined Model/ModelIndex to the elasticsearch index_name.
_model_to_index, _model_name_to_index, _model_name_to_model_idx = defaultdict(list), defaultdict(list), defaultdict(list)
_index_to_model, _idx_name_to_mdl_to_mdlidx = defaultdict(list), defaultdict(dict)
_model_name_to_default_index, _alias_hooks = {}, {}
_managed_models = []
__loaded_indices__ = False
@classmethod
def __load_settings__(cls):
if cls.__loaded_indices__:
return
cls.__loaded_indices__ = True
# Loading indices.
for index_name, module_str in iteritems(cls.BUNGIE['INDICES']):
index_module = import_module(module_str)
for index_obj in itervalues(index_module.__dict__):
try:
if issubclass(index_obj, ModelIndex) and index_obj != ModelIndex:
index_instance = index_obj()
assoc_model = index_instance.get_model()
cls._index_to_model[index_name].append(assoc_model)
cls._model_name_to_model_idx[assoc_model.__name__].append(index_instance)
cls._idx_name_to_mdl_to_mdlidx[index_name][assoc_model.__name__] = index_instance
if index_instance.is_default:
if assoc_model.__name__ in cls._model_name_to_default_index:
raise AttributeError('ModelIndex {} on index {} is marked as default, but {} was already set as default.'.format(index_instance, index_name, cls._model_name_to_default_index[assoc_model.__name__]))
cls._model_name_to_default_index[assoc_model.__name__] = index_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
# Create reverse maps in order to have O(1) access.
for index_name, models in iteritems(cls._index_to_model):
for model in models:
cls._model_to_index[model].append(index_name)
cls._model_name_to_index[model.__name__].append(index_name)
# Loading aliases.
for alias_prefix, module_str in iteritems(cls.BUNGIE.get('ALIASES', {})):
if alias_prefix is None:
alias_prefix = 'bungie'
if alias_prefix != '':
alias_prefix += '_'
alias_module = import_module(module_str)
for alias_obj in itervalues(alias_module.__dict__):
try:
if issubclass(alias_obj, SearchAlias) and alias_obj != SearchAlias:
alias_instance = alias_obj()
cls._alias_hooks[alias_prefix + alias_instance.alias_name] = alias_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
@classmethod
def _build_key(cls, urls, timeout, **settings):
# Order the settings by key and then turn it into a string with
# repr. There are a lot of edge cases here, but the worst that
# happens is that the key is different and so you get a new
# Elasticsearch. We'll probably have to tweak this.
settings = sorted(settings.items(), key=lambda item: item[0])
settings = repr([(k, v) for k, v in settings])
# elasticsearch allows URLs to be a string, so we make sure to
# account for that when converting whatever it is into a tuple.
if isinstance(urls, string_types):
urls = (urls,)
else:
urls = tuple(urls)
# Generate a tuple of all the bits and return that as the key
# because that's hashable.
key = (urls, timeout, settings)
return key
@classmethod
def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model))
@classmethod
def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model))
@classmethod
def get_indices(cls):
'''
Returns the list of indices defined in the settings.
'''
return cls._idx_name_to_mdl_to_mdlidx.keys()
@classmethod
def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
@classmethod
def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results
def __init__(self, urls=None, timeout=None, force_new=False, raw_results=False, **kwargs):
'''
Creates a new ElasticSearch DSL object. Grabs the ElasticSearch connection from the pool
if it has already been initialized. Otherwise, creates a new one.
If no parameters are passed, everything is determined from the Django settings.
:param urls: A list of URLs, or a single string of URL (without leading `http://`), or None to read from settings.
:param idx: A list of indices or a single string representing an index_name name. Is optional. Will be merged with `idx_alias`.
:param idx_alias: A list of index_name aliases or a single string representing an index_name alias, as defined in the settings. Will be merged with `index_name`.
:param timeout: Timeout used in the connection.
:param force_new: Set to `True` to force a new elasticsearch connection. Otherwise will aggressively use any connection with the exact same settings.
:param **kwargs: Additional settings to pass to the low level elasticsearch client and to elasticsearch-sal-py.search.Search.
'''
Bungiesearch.__load_settings__()
urls = urls or Bungiesearch.BUNGIE['URLS']
if not timeout:
timeout = Bungiesearch.BUNGIE.get('TIMEOUT', Bungiesearch.DEFAULT_TIMEOUT)
search_keys = ['using', 'index', 'doc_type', 'extra']
search_settings, es_settings = {}, {}
for k, v in iteritems(kwargs):
if k in search_keys:
search_settings[k] = v
else:
es_settings[k] = v
if not es_settings:
# If there aren't any provided elasticsearch settings, let's see if it's defined in the settings.
es_settings = Bungiesearch.BUNGIE.get('ES_SETTINGS', {})
# Building a caching key to cache the es_instance for later use (and retrieved a previously cached es_instance).
cache_key = Bungiesearch._build_key(urls, timeout, **es_settings)
es_instance = None
if not force_new:
if cache_key in Bungiesearch._cached_es_instances:
es_instance = Bungiesearch._cached_es_instances[cache_key]
if not es_instance:
es_instance = Elasticsearch(urls, timeout=timeout, **es_settings)
Bungiesearch._cached_es_instances[cache_key] = es_instance
if 'using' not in search_settings:
search_settings['using'] = es_instance
super(Bungiesearch, self).__init__(**search_settings)
# Creating instance attributes.
self._only = [] # Stores the exact fields to fetch from the database when mapping.
self.results = [] # Store the mapped and unmapped results.
self._raw_results_only = raw_results
def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance
def get_es_instance(self):
'''
Returns the low level elasticsearch instance to perform low level operations.
'''
return self._using
def execute_raw(self):
self.raw_results = super(Bungiesearch, self).execute()
def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results
def map_results(self):
'''
Maps raw results and store them.
'''
self.results = Bungiesearch.map_raw_results(self.raw_results, self)
def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s
def __iter__(self):
'''
Allows iterating on the response.
'''
self.execute()
return iter(self.results)
def __len__(self):
'''
Return elasticsearch-dsl-py count.
'''
return self.count()
def __getitem__(self, key):
'''
Overwriting the step in slice. It is used to set the results either as elasticsearch-dsl-py response object, or
attempt to fetch the Django model instance.
:warning: Getting an item will execute this search. Any search operation or field setting *must* be done prior to getting an item.
'''
if isinstance(key, slice):
if key.step is not None:
self._raw_results_only = key.step
if key.start is not None and key.stop is not None:
single_item = key.start - key.stop == -1
elif key.start is None and key.stop == 1:
single_item = True
else:
single_item = False
key = slice(key.start, key.stop)
else:
single_item = False
else:
single_item = True
results = super(Bungiesearch, self).__getitem__(key).execute()
if single_item:
try:
return results[0]
except IndexError:
return []
return results
def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
'''
return self.hook_alias(alias)
|
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | Bungiesearch.map_raw_results | python | def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results | Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L155-L210 | null | class Bungiesearch(Search):
'''
This object is used to read Django settings and initialize the elasticsearch connection.
'''
DEFAULT_TIMEOUT = 5
BUNGIE = settings.BUNGIESEARCH
# The following code loads each model index_name module (as defined in the settings) and stores
# index_name name to model index_name, and index_name name to model. Settings shouldn't change between
# subsequent calls to Search(), which is why this is static code.
_cached_es_instances = {}
# Let's go through the settings in order to map each defined Model/ModelIndex to the elasticsearch index_name.
_model_to_index, _model_name_to_index, _model_name_to_model_idx = defaultdict(list), defaultdict(list), defaultdict(list)
_index_to_model, _idx_name_to_mdl_to_mdlidx = defaultdict(list), defaultdict(dict)
_model_name_to_default_index, _alias_hooks = {}, {}
_managed_models = []
__loaded_indices__ = False
@classmethod
def __load_settings__(cls):
if cls.__loaded_indices__:
return
cls.__loaded_indices__ = True
# Loading indices.
for index_name, module_str in iteritems(cls.BUNGIE['INDICES']):
index_module = import_module(module_str)
for index_obj in itervalues(index_module.__dict__):
try:
if issubclass(index_obj, ModelIndex) and index_obj != ModelIndex:
index_instance = index_obj()
assoc_model = index_instance.get_model()
cls._index_to_model[index_name].append(assoc_model)
cls._model_name_to_model_idx[assoc_model.__name__].append(index_instance)
cls._idx_name_to_mdl_to_mdlidx[index_name][assoc_model.__name__] = index_instance
if index_instance.is_default:
if assoc_model.__name__ in cls._model_name_to_default_index:
raise AttributeError('ModelIndex {} on index {} is marked as default, but {} was already set as default.'.format(index_instance, index_name, cls._model_name_to_default_index[assoc_model.__name__]))
cls._model_name_to_default_index[assoc_model.__name__] = index_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
# Create reverse maps in order to have O(1) access.
for index_name, models in iteritems(cls._index_to_model):
for model in models:
cls._model_to_index[model].append(index_name)
cls._model_name_to_index[model.__name__].append(index_name)
# Loading aliases.
for alias_prefix, module_str in iteritems(cls.BUNGIE.get('ALIASES', {})):
if alias_prefix is None:
alias_prefix = 'bungie'
if alias_prefix != '':
alias_prefix += '_'
alias_module = import_module(module_str)
for alias_obj in itervalues(alias_module.__dict__):
try:
if issubclass(alias_obj, SearchAlias) and alias_obj != SearchAlias:
alias_instance = alias_obj()
cls._alias_hooks[alias_prefix + alias_instance.alias_name] = alias_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
@classmethod
def _build_key(cls, urls, timeout, **settings):
# Order the settings by key and then turn it into a string with
# repr. There are a lot of edge cases here, but the worst that
# happens is that the key is different and so you get a new
# Elasticsearch. We'll probably have to tweak this.
settings = sorted(settings.items(), key=lambda item: item[0])
settings = repr([(k, v) for k, v in settings])
# elasticsearch allows URLs to be a string, so we make sure to
# account for that when converting whatever it is into a tuple.
if isinstance(urls, string_types):
urls = (urls,)
else:
urls = tuple(urls)
# Generate a tuple of all the bits and return that as the key
# because that's hashable.
key = (urls, timeout, settings)
return key
@classmethod
def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model))
@classmethod
def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model))
@classmethod
def get_indices(cls):
'''
Returns the list of indices defined in the settings.
'''
return cls._idx_name_to_mdl_to_mdlidx.keys()
@classmethod
def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def __init__(self, urls=None, timeout=None, force_new=False, raw_results=False, **kwargs):
'''
Creates a new ElasticSearch DSL object. Grabs the ElasticSearch connection from the pool
if it has already been initialized. Otherwise, creates a new one.
If no parameters are passed, everything is determined from the Django settings.
:param urls: A list of URLs, or a single string of URL (without leading `http://`), or None to read from settings.
:param idx: A list of indices or a single string representing an index_name name. Is optional. Will be merged with `idx_alias`.
:param idx_alias: A list of index_name aliases or a single string representing an index_name alias, as defined in the settings. Will be merged with `index_name`.
:param timeout: Timeout used in the connection.
:param force_new: Set to `True` to force a new elasticsearch connection. Otherwise will aggressively use any connection with the exact same settings.
:param **kwargs: Additional settings to pass to the low level elasticsearch client and to elasticsearch-sal-py.search.Search.
'''
Bungiesearch.__load_settings__()
urls = urls or Bungiesearch.BUNGIE['URLS']
if not timeout:
timeout = Bungiesearch.BUNGIE.get('TIMEOUT', Bungiesearch.DEFAULT_TIMEOUT)
search_keys = ['using', 'index', 'doc_type', 'extra']
search_settings, es_settings = {}, {}
for k, v in iteritems(kwargs):
if k in search_keys:
search_settings[k] = v
else:
es_settings[k] = v
if not es_settings:
# If there aren't any provided elasticsearch settings, let's see if it's defined in the settings.
es_settings = Bungiesearch.BUNGIE.get('ES_SETTINGS', {})
# Building a caching key to cache the es_instance for later use (and retrieved a previously cached es_instance).
cache_key = Bungiesearch._build_key(urls, timeout, **es_settings)
es_instance = None
if not force_new:
if cache_key in Bungiesearch._cached_es_instances:
es_instance = Bungiesearch._cached_es_instances[cache_key]
if not es_instance:
es_instance = Elasticsearch(urls, timeout=timeout, **es_settings)
Bungiesearch._cached_es_instances[cache_key] = es_instance
if 'using' not in search_settings:
search_settings['using'] = es_instance
super(Bungiesearch, self).__init__(**search_settings)
# Creating instance attributes.
self._only = [] # Stores the exact fields to fetch from the database when mapping.
self.results = [] # Store the mapped and unmapped results.
self._raw_results_only = raw_results
def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance
def get_es_instance(self):
'''
Returns the low level elasticsearch instance to perform low level operations.
'''
return self._using
def execute_raw(self):
self.raw_results = super(Bungiesearch, self).execute()
def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results
def map_results(self):
'''
Maps raw results and store them.
'''
self.results = Bungiesearch.map_raw_results(self.raw_results, self)
def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s
def __iter__(self):
'''
Allows iterating on the response.
'''
self.execute()
return iter(self.results)
def __len__(self):
'''
Return elasticsearch-dsl-py count.
'''
return self.count()
def __getitem__(self, key):
'''
Overwriting the step in slice. It is used to set the results either as elasticsearch-dsl-py response object, or
attempt to fetch the Django model instance.
:warning: Getting an item will execute this search. Any search operation or field setting *must* be done prior to getting an item.
'''
if isinstance(key, slice):
if key.step is not None:
self._raw_results_only = key.step
if key.start is not None and key.stop is not None:
single_item = key.start - key.stop == -1
elif key.start is None and key.stop == 1:
single_item = True
else:
single_item = False
key = slice(key.start, key.stop)
else:
single_item = False
else:
single_item = True
results = super(Bungiesearch, self).__getitem__(key).execute()
if single_item:
try:
return results[0]
except IndexError:
return []
return results
def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
'''
return self.hook_alias(alias)
|
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | Bungiesearch._clone | python | def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance | Must clone additional fields to those cloned by elasticsearch-dsl-py. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L266-L272 | null | class Bungiesearch(Search):
'''
This object is used to read Django settings and initialize the elasticsearch connection.
'''
DEFAULT_TIMEOUT = 5
BUNGIE = settings.BUNGIESEARCH
# The following code loads each model index_name module (as defined in the settings) and stores
# index_name name to model index_name, and index_name name to model. Settings shouldn't change between
# subsequent calls to Search(), which is why this is static code.
_cached_es_instances = {}
# Let's go through the settings in order to map each defined Model/ModelIndex to the elasticsearch index_name.
_model_to_index, _model_name_to_index, _model_name_to_model_idx = defaultdict(list), defaultdict(list), defaultdict(list)
_index_to_model, _idx_name_to_mdl_to_mdlidx = defaultdict(list), defaultdict(dict)
_model_name_to_default_index, _alias_hooks = {}, {}
_managed_models = []
__loaded_indices__ = False
@classmethod
def __load_settings__(cls):
if cls.__loaded_indices__:
return
cls.__loaded_indices__ = True
# Loading indices.
for index_name, module_str in iteritems(cls.BUNGIE['INDICES']):
index_module = import_module(module_str)
for index_obj in itervalues(index_module.__dict__):
try:
if issubclass(index_obj, ModelIndex) and index_obj != ModelIndex:
index_instance = index_obj()
assoc_model = index_instance.get_model()
cls._index_to_model[index_name].append(assoc_model)
cls._model_name_to_model_idx[assoc_model.__name__].append(index_instance)
cls._idx_name_to_mdl_to_mdlidx[index_name][assoc_model.__name__] = index_instance
if index_instance.is_default:
if assoc_model.__name__ in cls._model_name_to_default_index:
raise AttributeError('ModelIndex {} on index {} is marked as default, but {} was already set as default.'.format(index_instance, index_name, cls._model_name_to_default_index[assoc_model.__name__]))
cls._model_name_to_default_index[assoc_model.__name__] = index_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
# Create reverse maps in order to have O(1) access.
for index_name, models in iteritems(cls._index_to_model):
for model in models:
cls._model_to_index[model].append(index_name)
cls._model_name_to_index[model.__name__].append(index_name)
# Loading aliases.
for alias_prefix, module_str in iteritems(cls.BUNGIE.get('ALIASES', {})):
if alias_prefix is None:
alias_prefix = 'bungie'
if alias_prefix != '':
alias_prefix += '_'
alias_module = import_module(module_str)
for alias_obj in itervalues(alias_module.__dict__):
try:
if issubclass(alias_obj, SearchAlias) and alias_obj != SearchAlias:
alias_instance = alias_obj()
cls._alias_hooks[alias_prefix + alias_instance.alias_name] = alias_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
@classmethod
def _build_key(cls, urls, timeout, **settings):
# Order the settings by key and then turn it into a string with
# repr. There are a lot of edge cases here, but the worst that
# happens is that the key is different and so you get a new
# Elasticsearch. We'll probably have to tweak this.
settings = sorted(settings.items(), key=lambda item: item[0])
settings = repr([(k, v) for k, v in settings])
# elasticsearch allows URLs to be a string, so we make sure to
# account for that when converting whatever it is into a tuple.
if isinstance(urls, string_types):
urls = (urls,)
else:
urls = tuple(urls)
# Generate a tuple of all the bits and return that as the key
# because that's hashable.
key = (urls, timeout, settings)
return key
@classmethod
def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model))
@classmethod
def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model))
@classmethod
def get_indices(cls):
'''
Returns the list of indices defined in the settings.
'''
return cls._idx_name_to_mdl_to_mdlidx.keys()
@classmethod
def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results
def __init__(self, urls=None, timeout=None, force_new=False, raw_results=False, **kwargs):
'''
Creates a new ElasticSearch DSL object. Grabs the ElasticSearch connection from the pool
if it has already been initialized. Otherwise, creates a new one.
If no parameters are passed, everything is determined from the Django settings.
:param urls: A list of URLs, or a single string of URL (without leading `http://`), or None to read from settings.
:param idx: A list of indices or a single string representing an index_name name. Is optional. Will be merged with `idx_alias`.
:param idx_alias: A list of index_name aliases or a single string representing an index_name alias, as defined in the settings. Will be merged with `index_name`.
:param timeout: Timeout used in the connection.
:param force_new: Set to `True` to force a new elasticsearch connection. Otherwise will aggressively use any connection with the exact same settings.
:param **kwargs: Additional settings to pass to the low level elasticsearch client and to elasticsearch-sal-py.search.Search.
'''
Bungiesearch.__load_settings__()
urls = urls or Bungiesearch.BUNGIE['URLS']
if not timeout:
timeout = Bungiesearch.BUNGIE.get('TIMEOUT', Bungiesearch.DEFAULT_TIMEOUT)
search_keys = ['using', 'index', 'doc_type', 'extra']
search_settings, es_settings = {}, {}
for k, v in iteritems(kwargs):
if k in search_keys:
search_settings[k] = v
else:
es_settings[k] = v
if not es_settings:
# If there aren't any provided elasticsearch settings, let's see if it's defined in the settings.
es_settings = Bungiesearch.BUNGIE.get('ES_SETTINGS', {})
# Building a caching key to cache the es_instance for later use (and retrieved a previously cached es_instance).
cache_key = Bungiesearch._build_key(urls, timeout, **es_settings)
es_instance = None
if not force_new:
if cache_key in Bungiesearch._cached_es_instances:
es_instance = Bungiesearch._cached_es_instances[cache_key]
if not es_instance:
es_instance = Elasticsearch(urls, timeout=timeout, **es_settings)
Bungiesearch._cached_es_instances[cache_key] = es_instance
if 'using' not in search_settings:
search_settings['using'] = es_instance
super(Bungiesearch, self).__init__(**search_settings)
# Creating instance attributes.
self._only = [] # Stores the exact fields to fetch from the database when mapping.
self.results = [] # Store the mapped and unmapped results.
self._raw_results_only = raw_results
def get_es_instance(self):
'''
Returns the low level elasticsearch instance to perform low level operations.
'''
return self._using
def execute_raw(self):
self.raw_results = super(Bungiesearch, self).execute()
def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results
def map_results(self):
'''
Maps raw results and store them.
'''
self.results = Bungiesearch.map_raw_results(self.raw_results, self)
def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s
def __iter__(self):
'''
Allows iterating on the response.
'''
self.execute()
return iter(self.results)
def __len__(self):
'''
Return elasticsearch-dsl-py count.
'''
return self.count()
def __getitem__(self, key):
'''
Overwriting the step in slice. It is used to set the results either as elasticsearch-dsl-py response object, or
attempt to fetch the Django model instance.
:warning: Getting an item will execute this search. Any search operation or field setting *must* be done prior to getting an item.
'''
if isinstance(key, slice):
if key.step is not None:
self._raw_results_only = key.step
if key.start is not None and key.stop is not None:
single_item = key.start - key.stop == -1
elif key.start is None and key.stop == 1:
single_item = True
else:
single_item = False
key = slice(key.start, key.stop)
else:
single_item = False
else:
single_item = True
results = super(Bungiesearch, self).__getitem__(key).execute()
if single_item:
try:
return results[0]
except IndexError:
return []
return results
def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
'''
return self.hook_alias(alias)
|
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | Bungiesearch.execute | python | def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results | Executes the query and attempts to create model objects from results. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L283-L298 | null | class Bungiesearch(Search):
'''
This object is used to read Django settings and initialize the elasticsearch connection.
'''
DEFAULT_TIMEOUT = 5
BUNGIE = settings.BUNGIESEARCH
# The following code loads each model index_name module (as defined in the settings) and stores
# index_name name to model index_name, and index_name name to model. Settings shouldn't change between
# subsequent calls to Search(), which is why this is static code.
_cached_es_instances = {}
# Let's go through the settings in order to map each defined Model/ModelIndex to the elasticsearch index_name.
_model_to_index, _model_name_to_index, _model_name_to_model_idx = defaultdict(list), defaultdict(list), defaultdict(list)
_index_to_model, _idx_name_to_mdl_to_mdlidx = defaultdict(list), defaultdict(dict)
_model_name_to_default_index, _alias_hooks = {}, {}
_managed_models = []
__loaded_indices__ = False
@classmethod
def __load_settings__(cls):
if cls.__loaded_indices__:
return
cls.__loaded_indices__ = True
# Loading indices.
for index_name, module_str in iteritems(cls.BUNGIE['INDICES']):
index_module = import_module(module_str)
for index_obj in itervalues(index_module.__dict__):
try:
if issubclass(index_obj, ModelIndex) and index_obj != ModelIndex:
index_instance = index_obj()
assoc_model = index_instance.get_model()
cls._index_to_model[index_name].append(assoc_model)
cls._model_name_to_model_idx[assoc_model.__name__].append(index_instance)
cls._idx_name_to_mdl_to_mdlidx[index_name][assoc_model.__name__] = index_instance
if index_instance.is_default:
if assoc_model.__name__ in cls._model_name_to_default_index:
raise AttributeError('ModelIndex {} on index {} is marked as default, but {} was already set as default.'.format(index_instance, index_name, cls._model_name_to_default_index[assoc_model.__name__]))
cls._model_name_to_default_index[assoc_model.__name__] = index_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
# Create reverse maps in order to have O(1) access.
for index_name, models in iteritems(cls._index_to_model):
for model in models:
cls._model_to_index[model].append(index_name)
cls._model_name_to_index[model.__name__].append(index_name)
# Loading aliases.
for alias_prefix, module_str in iteritems(cls.BUNGIE.get('ALIASES', {})):
if alias_prefix is None:
alias_prefix = 'bungie'
if alias_prefix != '':
alias_prefix += '_'
alias_module = import_module(module_str)
for alias_obj in itervalues(alias_module.__dict__):
try:
if issubclass(alias_obj, SearchAlias) and alias_obj != SearchAlias:
alias_instance = alias_obj()
cls._alias_hooks[alias_prefix + alias_instance.alias_name] = alias_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
@classmethod
def _build_key(cls, urls, timeout, **settings):
# Order the settings by key and then turn it into a string with
# repr. There are a lot of edge cases here, but the worst that
# happens is that the key is different and so you get a new
# Elasticsearch. We'll probably have to tweak this.
settings = sorted(settings.items(), key=lambda item: item[0])
settings = repr([(k, v) for k, v in settings])
# elasticsearch allows URLs to be a string, so we make sure to
# account for that when converting whatever it is into a tuple.
if isinstance(urls, string_types):
urls = (urls,)
else:
urls = tuple(urls)
# Generate a tuple of all the bits and return that as the key
# because that's hashable.
key = (urls, timeout, settings)
return key
@classmethod
def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model))
@classmethod
def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model))
@classmethod
def get_indices(cls):
'''
Returns the list of indices defined in the settings.
'''
return cls._idx_name_to_mdl_to_mdlidx.keys()
@classmethod
def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results
def __init__(self, urls=None, timeout=None, force_new=False, raw_results=False, **kwargs):
'''
Creates a new ElasticSearch DSL object. Grabs the ElasticSearch connection from the pool
if it has already been initialized. Otherwise, creates a new one.
If no parameters are passed, everything is determined from the Django settings.
:param urls: A list of URLs, or a single string of URL (without leading `http://`), or None to read from settings.
:param idx: A list of indices or a single string representing an index_name name. Is optional. Will be merged with `idx_alias`.
:param idx_alias: A list of index_name aliases or a single string representing an index_name alias, as defined in the settings. Will be merged with `index_name`.
:param timeout: Timeout used in the connection.
:param force_new: Set to `True` to force a new elasticsearch connection. Otherwise will aggressively use any connection with the exact same settings.
:param **kwargs: Additional settings to pass to the low level elasticsearch client and to elasticsearch-sal-py.search.Search.
'''
Bungiesearch.__load_settings__()
urls = urls or Bungiesearch.BUNGIE['URLS']
if not timeout:
timeout = Bungiesearch.BUNGIE.get('TIMEOUT', Bungiesearch.DEFAULT_TIMEOUT)
search_keys = ['using', 'index', 'doc_type', 'extra']
search_settings, es_settings = {}, {}
for k, v in iteritems(kwargs):
if k in search_keys:
search_settings[k] = v
else:
es_settings[k] = v
if not es_settings:
# If there aren't any provided elasticsearch settings, let's see if it's defined in the settings.
es_settings = Bungiesearch.BUNGIE.get('ES_SETTINGS', {})
# Building a caching key to cache the es_instance for later use (and retrieved a previously cached es_instance).
cache_key = Bungiesearch._build_key(urls, timeout, **es_settings)
es_instance = None
if not force_new:
if cache_key in Bungiesearch._cached_es_instances:
es_instance = Bungiesearch._cached_es_instances[cache_key]
if not es_instance:
es_instance = Elasticsearch(urls, timeout=timeout, **es_settings)
Bungiesearch._cached_es_instances[cache_key] = es_instance
if 'using' not in search_settings:
search_settings['using'] = es_instance
super(Bungiesearch, self).__init__(**search_settings)
# Creating instance attributes.
self._only = [] # Stores the exact fields to fetch from the database when mapping.
self.results = [] # Store the mapped and unmapped results.
self._raw_results_only = raw_results
def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance
def get_es_instance(self):
'''
Returns the low level elasticsearch instance to perform low level operations.
'''
return self._using
def execute_raw(self):
self.raw_results = super(Bungiesearch, self).execute()
def map_results(self):
'''
Maps raw results and store them.
'''
self.results = Bungiesearch.map_raw_results(self.raw_results, self)
def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s
def __iter__(self):
'''
Allows iterating on the response.
'''
self.execute()
return iter(self.results)
def __len__(self):
'''
Return elasticsearch-dsl-py count.
'''
return self.count()
def __getitem__(self, key):
'''
Overwriting the step in slice. It is used to set the results either as elasticsearch-dsl-py response object, or
attempt to fetch the Django model instance.
:warning: Getting an item will execute this search. Any search operation or field setting *must* be done prior to getting an item.
'''
if isinstance(key, slice):
if key.step is not None:
self._raw_results_only = key.step
if key.start is not None and key.stop is not None:
single_item = key.start - key.stop == -1
elif key.start is None and key.stop == 1:
single_item = True
else:
single_item = False
key = slice(key.start, key.stop)
else:
single_item = False
else:
single_item = True
results = super(Bungiesearch, self).__getitem__(key).execute()
if single_item:
try:
return results[0]
except IndexError:
return []
return results
def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
'''
return self.hook_alias(alias)
|
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | Bungiesearch.only | python | def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s | Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L306-L315 | null | class Bungiesearch(Search):
'''
This object is used to read Django settings and initialize the elasticsearch connection.
'''
DEFAULT_TIMEOUT = 5
BUNGIE = settings.BUNGIESEARCH
# The following code loads each model index_name module (as defined in the settings) and stores
# index_name name to model index_name, and index_name name to model. Settings shouldn't change between
# subsequent calls to Search(), which is why this is static code.
_cached_es_instances = {}
# Let's go through the settings in order to map each defined Model/ModelIndex to the elasticsearch index_name.
_model_to_index, _model_name_to_index, _model_name_to_model_idx = defaultdict(list), defaultdict(list), defaultdict(list)
_index_to_model, _idx_name_to_mdl_to_mdlidx = defaultdict(list), defaultdict(dict)
_model_name_to_default_index, _alias_hooks = {}, {}
_managed_models = []
__loaded_indices__ = False
@classmethod
def __load_settings__(cls):
if cls.__loaded_indices__:
return
cls.__loaded_indices__ = True
# Loading indices.
for index_name, module_str in iteritems(cls.BUNGIE['INDICES']):
index_module = import_module(module_str)
for index_obj in itervalues(index_module.__dict__):
try:
if issubclass(index_obj, ModelIndex) and index_obj != ModelIndex:
index_instance = index_obj()
assoc_model = index_instance.get_model()
cls._index_to_model[index_name].append(assoc_model)
cls._model_name_to_model_idx[assoc_model.__name__].append(index_instance)
cls._idx_name_to_mdl_to_mdlidx[index_name][assoc_model.__name__] = index_instance
if index_instance.is_default:
if assoc_model.__name__ in cls._model_name_to_default_index:
raise AttributeError('ModelIndex {} on index {} is marked as default, but {} was already set as default.'.format(index_instance, index_name, cls._model_name_to_default_index[assoc_model.__name__]))
cls._model_name_to_default_index[assoc_model.__name__] = index_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
# Create reverse maps in order to have O(1) access.
for index_name, models in iteritems(cls._index_to_model):
for model in models:
cls._model_to_index[model].append(index_name)
cls._model_name_to_index[model.__name__].append(index_name)
# Loading aliases.
for alias_prefix, module_str in iteritems(cls.BUNGIE.get('ALIASES', {})):
if alias_prefix is None:
alias_prefix = 'bungie'
if alias_prefix != '':
alias_prefix += '_'
alias_module = import_module(module_str)
for alias_obj in itervalues(alias_module.__dict__):
try:
if issubclass(alias_obj, SearchAlias) and alias_obj != SearchAlias:
alias_instance = alias_obj()
cls._alias_hooks[alias_prefix + alias_instance.alias_name] = alias_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
@classmethod
def _build_key(cls, urls, timeout, **settings):
# Order the settings by key and then turn it into a string with
# repr. There are a lot of edge cases here, but the worst that
# happens is that the key is different and so you get a new
# Elasticsearch. We'll probably have to tweak this.
settings = sorted(settings.items(), key=lambda item: item[0])
settings = repr([(k, v) for k, v in settings])
# elasticsearch allows URLs to be a string, so we make sure to
# account for that when converting whatever it is into a tuple.
if isinstance(urls, string_types):
urls = (urls,)
else:
urls = tuple(urls)
# Generate a tuple of all the bits and return that as the key
# because that's hashable.
key = (urls, timeout, settings)
return key
@classmethod
def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model))
@classmethod
def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model))
@classmethod
def get_indices(cls):
'''
Returns the list of indices defined in the settings.
'''
return cls._idx_name_to_mdl_to_mdlidx.keys()
@classmethod
def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results
def __init__(self, urls=None, timeout=None, force_new=False, raw_results=False, **kwargs):
'''
Creates a new ElasticSearch DSL object. Grabs the ElasticSearch connection from the pool
if it has already been initialized. Otherwise, creates a new one.
If no parameters are passed, everything is determined from the Django settings.
:param urls: A list of URLs, or a single string of URL (without leading `http://`), or None to read from settings.
:param idx: A list of indices or a single string representing an index_name name. Is optional. Will be merged with `idx_alias`.
:param idx_alias: A list of index_name aliases or a single string representing an index_name alias, as defined in the settings. Will be merged with `index_name`.
:param timeout: Timeout used in the connection.
:param force_new: Set to `True` to force a new elasticsearch connection. Otherwise will aggressively use any connection with the exact same settings.
:param **kwargs: Additional settings to pass to the low level elasticsearch client and to elasticsearch-sal-py.search.Search.
'''
Bungiesearch.__load_settings__()
urls = urls or Bungiesearch.BUNGIE['URLS']
if not timeout:
timeout = Bungiesearch.BUNGIE.get('TIMEOUT', Bungiesearch.DEFAULT_TIMEOUT)
search_keys = ['using', 'index', 'doc_type', 'extra']
search_settings, es_settings = {}, {}
for k, v in iteritems(kwargs):
if k in search_keys:
search_settings[k] = v
else:
es_settings[k] = v
if not es_settings:
# If there aren't any provided elasticsearch settings, let's see if it's defined in the settings.
es_settings = Bungiesearch.BUNGIE.get('ES_SETTINGS', {})
# Building a caching key to cache the es_instance for later use (and retrieved a previously cached es_instance).
cache_key = Bungiesearch._build_key(urls, timeout, **es_settings)
es_instance = None
if not force_new:
if cache_key in Bungiesearch._cached_es_instances:
es_instance = Bungiesearch._cached_es_instances[cache_key]
if not es_instance:
es_instance = Elasticsearch(urls, timeout=timeout, **es_settings)
Bungiesearch._cached_es_instances[cache_key] = es_instance
if 'using' not in search_settings:
search_settings['using'] = es_instance
super(Bungiesearch, self).__init__(**search_settings)
# Creating instance attributes.
self._only = [] # Stores the exact fields to fetch from the database when mapping.
self.results = [] # Store the mapped and unmapped results.
self._raw_results_only = raw_results
def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance
def get_es_instance(self):
'''
Returns the low level elasticsearch instance to perform low level operations.
'''
return self._using
def execute_raw(self):
self.raw_results = super(Bungiesearch, self).execute()
def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results
def map_results(self):
'''
Maps raw results and store them.
'''
self.results = Bungiesearch.map_raw_results(self.raw_results, self)
def __iter__(self):
'''
Allows iterating on the response.
'''
self.execute()
return iter(self.results)
def __len__(self):
'''
Return elasticsearch-dsl-py count.
'''
return self.count()
def __getitem__(self, key):
'''
Overwriting the step in slice. It is used to set the results either as elasticsearch-dsl-py response object, or
attempt to fetch the Django model instance.
:warning: Getting an item will execute this search. Any search operation or field setting *must* be done prior to getting an item.
'''
if isinstance(key, slice):
if key.step is not None:
self._raw_results_only = key.step
if key.start is not None and key.stop is not None:
single_item = key.start - key.stop == -1
elif key.start is None and key.stop == 1:
single_item = True
else:
single_item = False
key = slice(key.start, key.stop)
else:
single_item = False
else:
single_item = True
results = super(Bungiesearch, self).__getitem__(key).execute()
if single_item:
try:
return results[0]
except IndexError:
return []
return results
def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
'''
return self.hook_alias(alias)
|
ChristopherRabotin/bungiesearch | bungiesearch/__init__.py | Bungiesearch.hook_alias | python | def hook_alias(self, alias, model_obj=None):
'''
Returns the alias function, if it exists and if it can be applied to this model.
'''
try:
search_alias = self._alias_hooks[alias]
except KeyError:
raise AttributeError('Could not find search alias named {}. Is this alias defined in BUNGIESEARCH["ALIASES"]?'.format(alias))
else:
if search_alias._applicable_models and \
((model_obj and model_obj not in search_alias._applicable_models) or \
not any([app_model_obj.__name__ in self._doc_type for app_model_obj in search_alias._applicable_models])):
raise ValueError('Search alias {} is not applicable to model/doc_types {}.'.format(alias, model_obj if model_obj else self._doc_type))
return search_alias.prepare(self, model_obj).alias_for | Returns the alias function, if it exists and if it can be applied to this model. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/__init__.py#L358-L371 | null | class Bungiesearch(Search):
'''
This object is used to read Django settings and initialize the elasticsearch connection.
'''
DEFAULT_TIMEOUT = 5
BUNGIE = settings.BUNGIESEARCH
# The following code loads each model index_name module (as defined in the settings) and stores
# index_name name to model index_name, and index_name name to model. Settings shouldn't change between
# subsequent calls to Search(), which is why this is static code.
_cached_es_instances = {}
# Let's go through the settings in order to map each defined Model/ModelIndex to the elasticsearch index_name.
_model_to_index, _model_name_to_index, _model_name_to_model_idx = defaultdict(list), defaultdict(list), defaultdict(list)
_index_to_model, _idx_name_to_mdl_to_mdlidx = defaultdict(list), defaultdict(dict)
_model_name_to_default_index, _alias_hooks = {}, {}
_managed_models = []
__loaded_indices__ = False
@classmethod
def __load_settings__(cls):
if cls.__loaded_indices__:
return
cls.__loaded_indices__ = True
# Loading indices.
for index_name, module_str in iteritems(cls.BUNGIE['INDICES']):
index_module = import_module(module_str)
for index_obj in itervalues(index_module.__dict__):
try:
if issubclass(index_obj, ModelIndex) and index_obj != ModelIndex:
index_instance = index_obj()
assoc_model = index_instance.get_model()
cls._index_to_model[index_name].append(assoc_model)
cls._model_name_to_model_idx[assoc_model.__name__].append(index_instance)
cls._idx_name_to_mdl_to_mdlidx[index_name][assoc_model.__name__] = index_instance
if index_instance.is_default:
if assoc_model.__name__ in cls._model_name_to_default_index:
raise AttributeError('ModelIndex {} on index {} is marked as default, but {} was already set as default.'.format(index_instance, index_name, cls._model_name_to_default_index[assoc_model.__name__]))
cls._model_name_to_default_index[assoc_model.__name__] = index_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
# Create reverse maps in order to have O(1) access.
for index_name, models in iteritems(cls._index_to_model):
for model in models:
cls._model_to_index[model].append(index_name)
cls._model_name_to_index[model.__name__].append(index_name)
# Loading aliases.
for alias_prefix, module_str in iteritems(cls.BUNGIE.get('ALIASES', {})):
if alias_prefix is None:
alias_prefix = 'bungie'
if alias_prefix != '':
alias_prefix += '_'
alias_module = import_module(module_str)
for alias_obj in itervalues(alias_module.__dict__):
try:
if issubclass(alias_obj, SearchAlias) and alias_obj != SearchAlias:
alias_instance = alias_obj()
cls._alias_hooks[alias_prefix + alias_instance.alias_name] = alias_instance
except TypeError:
pass # Oops, just attempted to get subclasses of a non-class.
@classmethod
def _build_key(cls, urls, timeout, **settings):
# Order the settings by key and then turn it into a string with
# repr. There are a lot of edge cases here, but the worst that
# happens is that the key is different and so you get a new
# Elasticsearch. We'll probably have to tweak this.
settings = sorted(settings.items(), key=lambda item: item[0])
settings = repr([(k, v) for k, v in settings])
# elasticsearch allows URLs to be a string, so we make sure to
# account for that when converting whatever it is into a tuple.
if isinstance(urls, string_types):
urls = (urls,)
else:
urls = tuple(urls)
# Generate a tuple of all the bits and return that as the key
# because that's hashable.
key = (urls, timeout, settings)
return key
@classmethod
def get_index(cls, model, via_class=False):
'''
Returns the index name (as a string) for the given model as a class or a string.
:param model: model name or model class if via_class set to True.
:param via_class: set to True if parameter model is a class.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
return cls._model_to_index[model] if via_class else cls._model_name_to_index[model]
except KeyError:
raise KeyError('Could not find any index defined for model {}. Is the model in one of the model index modules of BUNGIESEARCH["INDICES"]?'.format(model))
@classmethod
def get_model_index(cls, model, default=True):
'''
Returns the default model index for the given model, or the list of indices if default is False.
:param model: model name as a string.
:raise KeyError: If the provided model does not have any index associated.
'''
try:
if default:
return cls._model_name_to_default_index[model]
return cls._model_name_to_model_idx[model]
except KeyError:
raise KeyError('Could not find any model index defined for model {}.'.format(model))
@classmethod
def get_indices(cls):
'''
Returns the list of indices defined in the settings.
'''
return cls._idx_name_to_mdl_to_mdlidx.keys()
@classmethod
def get_models(cls, index, as_class=False):
'''
Returns the list of models defined for this index.
:param index: index name.
:param as_class: set to True to return the model as a model object instead of as a string.
'''
try:
return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def get_model_indices(cls, index):
'''
Returns the list of model indices (i.e. ModelIndex objects) defined for this index.
:param index: index name.
'''
try:
return cls._idx_name_to_mdl_to_mdlidx[index].values()
except KeyError:
raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
@classmethod
def map_raw_results(cls, raw_results, instance=None):
'''
Maps raw results to database model objects.
:param raw_results: list raw results as returned from elasticsearch-dsl-py.
:param instance: Bungiesearch instance if you want to make use of `.only()` or `optmize_queries` as defined in the ModelIndex.
:return: list of mapped results in the *same* order as returned by elasticsearch.
'''
# Let's iterate over the results and determine the appropriate mapping.
model_results = defaultdict(list)
# Initializing the list to the number of returned results. This allows us to restore each item in its position.
if hasattr(raw_results, 'hits'):
results = [None] * len(raw_results.hits)
else:
results = [None] * len(raw_results)
found_results = {}
for pos, result in enumerate(raw_results):
model_name = result.meta.doc_type
if model_name not in Bungiesearch._model_name_to_index or result.meta.index not in Bungiesearch._model_name_to_index[model_name]:
logger.warning('Returned object of type {} ({}) is not defined in the settings, or is not associated to the same index as in the settings.'.format(model_name, result))
results[pos] = result
else:
meta = Bungiesearch.get_model_index(model_name).Meta
model_results['{}.{}'.format(result.meta.index, model_name)].append(result.meta.id)
found_results['{1.meta.index}.{0}.{1.meta.id}'.format(model_name, result)] = (pos, result.meta)
# Now that we have model ids per model name, let's fetch everything at once.
for ref_name, ids in iteritems(model_results):
index_name, model_name = ref_name.split('.')
model_idx = Bungiesearch._idx_name_to_mdl_to_mdlidx[index_name][model_name]
model_obj = model_idx.get_model()
items = model_obj.objects.filter(pk__in=ids)
if instance:
if instance._only == '__model' or model_idx.optimize_queries:
desired_fields = model_idx.fields_to_fetch
elif instance._only == '__fields':
desired_fields = instance._fields
else:
desired_fields = instance._only
if desired_fields: # Prevents setting the database fetch to __fields but not having specified any field to elasticsearch.
items = items.only(
*[field.name
for field in model_obj._meta.get_fields()
# For complete backwards compatibility, you may want to exclude
# GenericForeignKey from the results.
if field.name in desired_fields and \
not (field.many_to_one and field.related_model is None)
]
)
# Let's reposition each item in the results and set the _searchmeta meta information.
for item in items:
pos, meta = found_results['{}.{}.{}'.format(index_name, model_name, item.pk)]
item._searchmeta = meta
results[pos] = item
return results
def __init__(self, urls=None, timeout=None, force_new=False, raw_results=False, **kwargs):
'''
Creates a new ElasticSearch DSL object. Grabs the ElasticSearch connection from the pool
if it has already been initialized. Otherwise, creates a new one.
If no parameters are passed, everything is determined from the Django settings.
:param urls: A list of URLs, or a single string of URL (without leading `http://`), or None to read from settings.
:param idx: A list of indices or a single string representing an index_name name. Is optional. Will be merged with `idx_alias`.
:param idx_alias: A list of index_name aliases or a single string representing an index_name alias, as defined in the settings. Will be merged with `index_name`.
:param timeout: Timeout used in the connection.
:param force_new: Set to `True` to force a new elasticsearch connection. Otherwise will aggressively use any connection with the exact same settings.
:param **kwargs: Additional settings to pass to the low level elasticsearch client and to elasticsearch-sal-py.search.Search.
'''
Bungiesearch.__load_settings__()
urls = urls or Bungiesearch.BUNGIE['URLS']
if not timeout:
timeout = Bungiesearch.BUNGIE.get('TIMEOUT', Bungiesearch.DEFAULT_TIMEOUT)
search_keys = ['using', 'index', 'doc_type', 'extra']
search_settings, es_settings = {}, {}
for k, v in iteritems(kwargs):
if k in search_keys:
search_settings[k] = v
else:
es_settings[k] = v
if not es_settings:
# If there aren't any provided elasticsearch settings, let's see if it's defined in the settings.
es_settings = Bungiesearch.BUNGIE.get('ES_SETTINGS', {})
# Building a caching key to cache the es_instance for later use (and retrieved a previously cached es_instance).
cache_key = Bungiesearch._build_key(urls, timeout, **es_settings)
es_instance = None
if not force_new:
if cache_key in Bungiesearch._cached_es_instances:
es_instance = Bungiesearch._cached_es_instances[cache_key]
if not es_instance:
es_instance = Elasticsearch(urls, timeout=timeout, **es_settings)
Bungiesearch._cached_es_instances[cache_key] = es_instance
if 'using' not in search_settings:
search_settings['using'] = es_instance
super(Bungiesearch, self).__init__(**search_settings)
# Creating instance attributes.
self._only = [] # Stores the exact fields to fetch from the database when mapping.
self.results = [] # Store the mapped and unmapped results.
self._raw_results_only = raw_results
def _clone(self):
'''
Must clone additional fields to those cloned by elasticsearch-dsl-py.
'''
instance = super(Bungiesearch, self)._clone()
instance._raw_results_only = self._raw_results_only
return instance
def get_es_instance(self):
'''
Returns the low level elasticsearch instance to perform low level operations.
'''
return self._using
def execute_raw(self):
self.raw_results = super(Bungiesearch, self).execute()
def execute(self, return_results=True):
'''
Executes the query and attempts to create model objects from results.
'''
if self.results:
return self.results if return_results else None
self.execute_raw()
if self._raw_results_only:
self.results = self.raw_results
else:
self.map_results()
if return_results:
return self.results
def map_results(self):
'''
Maps raw results and store them.
'''
self.results = Bungiesearch.map_raw_results(self.raw_results, self)
def only(self, *fields):
'''
Restricts the fields to be fetched when mapping. Set to `__model` to fetch all fields define in the ModelIndex.
'''
s = self._clone()
if len(fields) == 1 and fields[0] == '__model':
s._only = '__model'
else:
s._only = fields
return s
def __iter__(self):
'''
Allows iterating on the response.
'''
self.execute()
return iter(self.results)
def __len__(self):
'''
Return elasticsearch-dsl-py count.
'''
return self.count()
def __getitem__(self, key):
'''
Overwriting the step in slice. It is used to set the results either as elasticsearch-dsl-py response object, or
attempt to fetch the Django model instance.
:warning: Getting an item will execute this search. Any search operation or field setting *must* be done prior to getting an item.
'''
if isinstance(key, slice):
if key.step is not None:
self._raw_results_only = key.step
if key.start is not None and key.stop is not None:
single_item = key.start - key.stop == -1
elif key.start is None and key.stop == 1:
single_item = True
else:
single_item = False
key = slice(key.start, key.stop)
else:
single_item = False
else:
single_item = True
results = super(Bungiesearch, self).__getitem__(key).execute()
if single_item:
try:
return results[0]
except IndexError:
return []
return results
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
'''
return self.hook_alias(alias)
|
ChristopherRabotin/bungiesearch | bungiesearch/managers.py | BungiesearchManager.custom_search | python | def custom_search(self, index, doc_type):
'''
Performs a search on a custom elasticsearch index and mapping. Will not attempt to map result objects.
'''
from bungiesearch import Bungiesearch
return Bungiesearch(raw_results=True).index(index).doc_type(doc_type) | Performs a search on a custom elasticsearch index and mapping. Will not attempt to map result objects. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/managers.py#L24-L29 | null | class BungiesearchManager(Manager):
model = None
'''
A Django manager for integrated search into models.
'''
@property
def search(self):
from bungiesearch import Bungiesearch
return Bungiesearch().index(*Bungiesearch.get_index(self.model, via_class=True)).doc_type(self.model.__name__)
def search_index(self, index):
from bungiesearch import Bungiesearch
if index not in Bungiesearch.get_index(self.model, via_class=True):
logger.warning('Model/doctype {} is not present on index {}: search may return no results.'.format(self.model.__name__, index))
return Bungiesearch().index(index).doc_type(self.model.__name__)
def contribute_to_class(self, cls, name):
'''
Sets up the signal processor. Since self.model is not available
in the constructor, we perform this operation here.
'''
super(BungiesearchManager, self).contribute_to_class(cls, name)
from . import Bungiesearch
from .signals import get_signal_processor
settings = Bungiesearch.BUNGIE
if 'SIGNALS' in settings:
self.signal_processor = get_signal_processor()
self.signal_processor.setup(self.model)
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
This function will check whether the given model is allowed to use the proposed alias and will raise an attribute error if not.
'''
# Don't treat "private" attrs as possible aliases. This prevents an infinite recursion bug.
# Similarly, if Bungiesearch is installed but not enabled, raise the expected error
if alias[0] == '_' or not dj_settings.BUNGIESEARCH:
raise AttributeError("'{}' object has no attribute '{}'".format(type(self), alias))
return self.search.hook_alias(alias, self.model)
|
ChristopherRabotin/bungiesearch | bungiesearch/managers.py | BungiesearchManager.contribute_to_class | python | def contribute_to_class(self, cls, name):
'''
Sets up the signal processor. Since self.model is not available
in the constructor, we perform this operation here.
'''
super(BungiesearchManager, self).contribute_to_class(cls, name)
from . import Bungiesearch
from .signals import get_signal_processor
settings = Bungiesearch.BUNGIE
if 'SIGNALS' in settings:
self.signal_processor = get_signal_processor()
self.signal_processor.setup(self.model) | Sets up the signal processor. Since self.model is not available
in the constructor, we perform this operation here. | train | https://github.com/ChristopherRabotin/bungiesearch/blob/13768342bc2698b214eb0003c2d113b6e273c30d/bungiesearch/managers.py#L31-L43 | [
"def get_signal_processor():\n signals = Bungiesearch.BUNGIE['SIGNALS']\n if 'SIGNAL_CLASS' in signals:\n signal_path = signals['SIGNAL_CLASS'].split('.')\n signal_module = import_module('.'.join(signal_path[:-1]))\n signal_class = getattr(signal_module, signal_path[-1])\n else:\n signal_class = BungieSignalProcessor\n return signal_class()\n",
"def setup(self, model):\n signals.post_save.connect(self.post_save_connector, sender=model)\n signals.pre_delete.connect(self.pre_delete_connector, sender=model)\n"
] | class BungiesearchManager(Manager):
model = None
'''
A Django manager for integrated search into models.
'''
@property
def search(self):
from bungiesearch import Bungiesearch
return Bungiesearch().index(*Bungiesearch.get_index(self.model, via_class=True)).doc_type(self.model.__name__)
def search_index(self, index):
from bungiesearch import Bungiesearch
if index not in Bungiesearch.get_index(self.model, via_class=True):
logger.warning('Model/doctype {} is not present on index {}: search may return no results.'.format(self.model.__name__, index))
return Bungiesearch().index(index).doc_type(self.model.__name__)
def custom_search(self, index, doc_type):
'''
Performs a search on a custom elasticsearch index and mapping. Will not attempt to map result objects.
'''
from bungiesearch import Bungiesearch
return Bungiesearch(raw_results=True).index(index).doc_type(doc_type)
def __getattr__(self, alias):
'''
Shortcut for search aliases. As explained in the docs (https://docs.python.org/2/reference/datamodel.html#object.__getattr__),
this is only called as a last resort in case the attribute is not found.
This function will check whether the given model is allowed to use the proposed alias and will raise an attribute error if not.
'''
# Don't treat "private" attrs as possible aliases. This prevents an infinite recursion bug.
# Similarly, if Bungiesearch is installed but not enabled, raise the expected error
if alias[0] == '_' or not dj_settings.BUNGIESEARCH:
raise AttributeError("'{}' object has no attribute '{}'".format(type(self), alias))
return self.search.hook_alias(alias, self.model)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.