code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import logging
from rope.contrib.codeassist import code_assist, sorted_proposals
from pylsp import hookimpl, lsp
log = logging.getLogger(__name__)
@hookimpl
def pylsp_settings():
# Default rope_completion to disabled
return {'plugins': {'rope_completion': {'enabled': False, 'eager': False}}}
def _resolve_completion(completion, data):
# pylint: disable=broad-except
try:
doc = data.get_doc()
except Exception as e:
log.debug("Failed to resolve Rope completion: %s", e)
doc = ""
completion['detail'] = '{0} {1}'.format(data.scope or "", data.name)
completion['documentation'] = doc
return completion
@hookimpl
def pylsp_completions(config, workspace, document, position):
# pylint: disable=too-many-locals
settings = config.plugin_settings('rope_completion', document_path=document.path)
resolve_eagerly = settings.get('eager', False)
# Rope is a bit rubbish at completing module imports, so we'll return None
word = document.word_at_position({
# The -1 should really be trying to look at the previous word, but that might be quite expensive
# So we only skip import completions when the cursor is one space after `import`
'line': position['line'], 'character': max(position['character'] - 1, 0),
})
if word == 'import':
return None
offset = document.offset_at_position(position)
rope_config = config.settings(document_path=document.path).get('rope', {})
rope_project = workspace._rope_project_builder(rope_config)
document_rope = document._rope_resource(rope_config)
try:
definitions = code_assist(rope_project, document.source, offset, document_rope, maxfixes=3)
except Exception as e: # pylint: disable=broad-except
log.debug("Failed to run Rope code assist: %s", e)
return []
definitions = sorted_proposals(definitions)
new_definitions = []
for d in definitions:
item = {
'label': d.name,
'kind': _kind(d),
'sortText': _sort_text(d),
'data': {
'doc_uri': document.uri
}
}
if resolve_eagerly:
item = _resolve_completion(item, d)
new_definitions.append(item)
# most recently retrieved completion items, used for resolution
document.shared_data['LAST_ROPE_COMPLETIONS'] = {
# label is the only required property; here it is assumed to be unique
completion['label']: (completion, data)
for completion, data in zip(new_definitions, definitions)
}
definitions = new_definitions
return definitions or None
@hookimpl
def pylsp_completion_item_resolve(completion_item, document):
"""Resolve formatted completion for given non-resolved completion"""
shared_data = document.shared_data['LAST_ROPE_COMPLETIONS'].get(completion_item['label'])
if shared_data:
completion, data = shared_data
return _resolve_completion(completion, data)
return completion_item
def _sort_text(definition):
""" Ensure builtins appear at the bottom.
Description is of format <type>: <module>.<item>
"""
if definition.name.startswith("_"):
# It's a 'hidden' func, put it next last
return 'z' + definition.name
if definition.scope == 'builtin':
return 'y' + definition.name
# Else put it at the front
return 'a' + definition.name
def _kind(d):
""" Return the LSP type """
MAP = {
'none': lsp.CompletionItemKind.Value,
'type': lsp.CompletionItemKind.Class,
'tuple': lsp.CompletionItemKind.Class,
'dict': lsp.CompletionItemKind.Class,
'dictionary': lsp.CompletionItemKind.Class,
'function': lsp.CompletionItemKind.Function,
'lambda': lsp.CompletionItemKind.Function,
'generator': lsp.CompletionItemKind.Function,
'class': lsp.CompletionItemKind.Class,
'instance': lsp.CompletionItemKind.Reference,
'method': lsp.CompletionItemKind.Method,
'builtin': lsp.CompletionItemKind.Class,
'builtinfunction': lsp.CompletionItemKind.Function,
'module': lsp.CompletionItemKind.Module,
'file': lsp.CompletionItemKind.File,
'xrange': lsp.CompletionItemKind.Class,
'slice': lsp.CompletionItemKind.Class,
'traceback': lsp.CompletionItemKind.Class,
'frame': lsp.CompletionItemKind.Class,
'buffer': lsp.CompletionItemKind.Class,
'dictproxy': lsp.CompletionItemKind.Class,
'funcdef': lsp.CompletionItemKind.Function,
'property': lsp.CompletionItemKind.Property,
'import': lsp.CompletionItemKind.Module,
'keyword': lsp.CompletionItemKind.Keyword,
'constant': lsp.CompletionItemKind.Variable,
'variable': lsp.CompletionItemKind.Variable,
'value': lsp.CompletionItemKind.Value,
'param': lsp.CompletionItemKind.Variable,
'statement': lsp.CompletionItemKind.Keyword,
}
return MAP.get(d.type) | /replit-python-lsp-server-1.15.9.tar.gz/replit-python-lsp-server-1.15.9/pylsp/plugins/rope_completion.py | 0.619586 | 0.196171 | rope_completion.py | pypi |
from collections import abc
import json
from typing import (
AbstractSet,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
)
import urllib
import aiohttp
import requests
def to_primitive(o: Any) -> Any:
"""If object is an observed object, converts to primitve, otherwise returns it.
Args:
o (Any): Any object.
Returns:
Any: The primitive equivalent if o is an ObservedList or ObservedDict,
otherwise o.
"""
if isinstance(o, ObservedList) or isinstance(o, ObservedDict):
return o.value
return o
class DBJSONEncoder(json.JSONEncoder):
"""A JSON encoder that uses to_primitive on passed objects."""
def default(self, o: Any) -> Any:
"""Runs to_primitive on the passed object."""
return to_primitive(o)
def dumps(val: Any) -> str:
"""JSON encode a value in the smallest way possible.
Also handles ObservedList and ObservedDict by using a custom encoder.
Args:
val (Any): The value to be encoded.
Returns:
str: The JSON string.
"""
return json.dumps(val, separators=(",", ":"), cls=DBJSONEncoder)
_dumps = dumps
class AsyncDatabase:
"""Async interface for Repl.it Database."""
__slots__ = ("db_url", "sess")
def __init__(self, db_url: str) -> None:
"""Initialize database. You shouldn't have to do this manually.
Args:
db_url (str): Database url to use.
"""
self.db_url = db_url
self.sess = aiohttp.ClientSession()
async def __aenter__(self) -> "AsyncDatabase":
return self
async def __aexit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.sess.close()
async def get(self, key: str) -> str:
"""Return the value for key if key is in the database.
This method will JSON decode the value. To disable this behavior, use the
`get_raw` method instead.
Args:
key (str): The key to retreive
Returns:
str: The the value for key if key is in the database.
"""
return json.loads(await self.get_raw(key))
async def get_raw(self, key: str) -> str:
"""Get the value of an item from the database.
Args:
key (str): The key to retreive
Raises:
KeyError: Key is not set
Returns:
str: The value of the key
"""
async with self.sess.get(
self.db_url + "/" + urllib.parse.quote(key)
) as response:
if response.status == 404:
raise KeyError(key)
response.raise_for_status()
return await response.text()
async def set(self, key: str, value: Any) -> None:
"""Set a key in the database to the result of JSON encoding value.
Args:
key (str): The key to set
value (Any): The value to set it to. Must be JSON-serializable.
"""
await self.set_raw(key, _dumps(value))
async def set_raw(self, key: str, value: str) -> None:
"""Set a key in the database to value.
Args:
key (str): The key to set
value (str): The value to set it to
"""
await self.set_bulk_raw({key: value})
async def set_bulk(self, values: Dict[str, Any]) -> None:
"""Set multiple values in the database, JSON encoding them.
Args:
values (Dict[str, Any]): A dictionary of values to put into the dictionary.
Values must be JSON serializeable.
"""
await self.set_bulk_raw({k: _dumps(v) for k, v in values.items()})
async def set_bulk_raw(self, values: Dict[str, str]) -> None:
"""Set multiple values in the database.
Args:
values (Dict[str, str]): The key-value pairs to set.
"""
async with self.sess.post(self.db_url, data=values) as response:
response.raise_for_status()
async def delete(self, key: str) -> None:
"""Delete a key from the database.
Args:
key (str): The key to delete
Raises:
KeyError: Key does not exist
"""
async with self.sess.delete(
self.db_url + "/" + urllib.parse.quote(key)
) as response:
if response.status == 404:
raise KeyError(key)
response.raise_for_status()
async def list(self, prefix: str) -> Tuple[str, ...]:
"""List keys in the database which start with prefix.
Args:
prefix (str): The prefix keys must start with, blank not not check.
Returns:
Tuple[str]: The keys found.
"""
params = {"prefix": prefix, "encode": "true"}
async with self.sess.get(self.db_url, params=params) as response:
response.raise_for_status()
text = await response.text()
if not text:
return tuple()
else:
return tuple(urllib.parse.unquote(k) for k in text.split("\n"))
async def to_dict(self, prefix: str = "") -> Dict[str, str]:
"""Dump all data in the database into a dictionary.
Args:
prefix (str): The prefix the keys must start with,
blank means anything. Defaults to "".
Returns:
Dict[str, str]: All keys in the database.
"""
ret = {}
keys = await self.list(prefix=prefix)
for i in keys:
ret[i] = await self.get(i)
return ret
async def keys(self) -> Tuple[str, ...]:
"""Get all keys in the database.
Returns:
Tuple[str]: The keys in the database.
"""
return await self.list("")
async def values(self) -> Tuple[str, ...]:
"""Get every value in the database.
Returns:
Tuple[str]: The values in the database.
"""
data = await self.to_dict()
return tuple(data.values())
async def items(self) -> Tuple[Tuple[str, str], ...]:
"""Convert the database to a dict and return the dict's items method.
Returns:
Tuple[Tuple[str]]: The items
"""
return tuple((await self.to_dict()).items())
def __repr__(self) -> str:
"""A representation of the database.
Returns:
A string representation of the database object.
"""
return f"<{self.__class__.__name__}(db_url={self.db_url!r})>"
class ObservedList(abc.MutableSequence):
"""A list that calls a function every time it is mutated.
Attributes:
value (List): The underlying list.
"""
__slots__ = ("_on_mutate_handler", "value")
def __init__(
self, on_mutate: Callable[[List], None], value: Optional[List] = None
) -> None:
self._on_mutate_handler = on_mutate
if value is None:
self.value = []
else:
self.value = value
def on_mutate(self) -> None:
"""Calls the mutation handler with the underlying list as an argument."""
self._on_mutate_handler(self.value)
def __getitem__(self, i: Union[int, slice]) -> Any:
return self.value[i]
def __setitem__(self, i: Union[int, slice], val: Any) -> None:
self.value[i] = val
self.on_mutate()
def __delitem__(self, i: Union[int, slice]) -> None:
del self.value[i]
self.on_mutate()
def __len__(self) -> int:
return len(self.value)
def __iter__(self) -> Iterator[Any]:
return iter(self.value)
def __imul__(self, rhs: Any) -> Any:
self.value *= rhs
self.on_mutate()
return self.value
def __eq__(self, rhs: Any) -> bool:
return self.value == rhs
def insert(self, i: int, elem: Any) -> None:
"""Inserts a value into the underlying list."""
self.value.insert(i, elem)
self.on_mutate()
def set_value(self, value: List) -> None:
"""Sets the value attribute and triggers the mutation function."""
self.value = value
self.on_mutate()
def __repr__(self) -> str:
return f"{type(self).__name__}(value={self.value!r})"
class ObservedDict(abc.MutableMapping):
"""A list that calls a function every time it is mutated.
Attributes:
value (Dict): The underlying dict.
"""
__slots__ = ("_on_mutate_handler", "value")
def __init__(
self, on_mutate: Callable[[Dict], None], value: Optional[Dict] = None
) -> None:
self._on_mutate_handler = on_mutate
if value is None:
self.value = {}
else:
self.value = value
def on_mutate(self) -> None:
"""Calls the mutation handler with the underlying dict as an argument."""
self._on_mutate_handler(self.value)
def __contains__(self, k: Any) -> bool:
return k in self.value
def __getitem__(self, k: Any) -> Any:
return self.value[k]
def __setitem__(self, k: Any, v: Any) -> None:
self.value[k] = v
self.on_mutate()
def __delitem__(self, k: Any) -> None:
del self.value[k]
self.on_mutate()
def __iter__(self) -> Iterator[Any]:
return iter(self.value)
def __len__(self) -> int:
return len(self.value)
def __eq__(self, rhs: Any) -> bool:
return self.value == rhs
def __imul__(self, rhs: Any) -> Any:
self.value *= rhs
self.on_mutate()
return self.value
def set_value(self, value: Dict) -> None:
"""Sets the value attribute and triggers the mutation function."""
self.value = value
self.on_mutate()
def __repr__(self) -> str:
return f"{type(self).__name__}(value={self.value!r})"
# By putting these outside we save some memory
def _get_on_mutate_cb(d: Any) -> Callable[[Any], None]:
def cb(_: Any) -> None:
d.on_mutate()
return cb
def _get_set_cb(db: Any, k: str) -> Callable[[Any], None]:
def cb(val: Any) -> None:
db[k] = val
return cb
def item_to_observed(on_mutate: Callable[[Any], None], item: Any) -> Any:
"""Takes a JSON value and recursively converts it into an Observed value."""
if isinstance(item, dict):
# no-op handler so we don't call on_mutate in the loop below
observed_dict = ObservedDict((lambda _: None), item)
cb = _get_on_mutate_cb(observed_dict)
for k, v in item.items():
observed_dict[k] = item_to_observed(cb, v)
observed_dict._on_mutate_handler = on_mutate
return observed_dict
elif isinstance(item, list):
# no-op handler so we don't call on_mutate in the loop below
observed_list = ObservedList((lambda _: None), item)
cb = _get_on_mutate_cb(observed_list)
for i, v in enumerate(item):
observed_list[i] = item_to_observed(cb, v)
observed_list._on_mutate_handler = on_mutate
return observed_list
else:
return item
class Database(abc.MutableMapping):
"""Dictionary-like interface for Repl.it Database.
This interface will coerce all values everything to and from JSON. If you
don't want this, use AsyncDatabase instead.
"""
__slots__ = ("db_url", "sess")
def __init__(self, db_url: str) -> None:
"""Initialize database. You shouldn't have to do this manually.
Args:
db_url (str): Database url to use.
"""
self.db_url = db_url
self.sess = requests.Session()
def __getitem__(self, key: str) -> Any:
"""Get the value of an item from the database.
Will replace the mutable JSON types of dict and list with subclasses that
enable nested setting. These classes will block to request the DB on every
mutation, which can have performance implications. To disable this, use the
`get_raw` method instead.
This method will JSON decode the value. To disable this behavior, use the
`get_raw` method instead.
Args:
key (str): The key to retreive
Returns:
Any: The value of the key
"""
raw_val = self.get_raw(key)
val = json.loads(raw_val)
return item_to_observed(_get_set_cb(self, key), val)
# This should be posititional only but flake8 doesn't like that
def get(self, key: str, default: Any = None) -> Any:
"""Return the value for key if key is in the database, else default.
Will replace the mutable JSON types of dict and list with subclasses that
enable nested setting. These classes will block to request the DB on every
mutation, which can have performance implications. To disable this, use the
`get_raw` method instead.
This method will JSON decode the value. To disable this behavior, use the
`get_raw` method instead.
Args:
key (str): The key to retreive
default (Any): The default to return if the key is not the database.
Defaults to None.
Returns:
Any: The the value for key if key is in the database, else default.
"""
return super().get(key, item_to_observed(_get_set_cb(self, key), default))
def get_raw(self, key: str) -> str:
"""Look up the given key in the database and return the corresponding value.
Args:
key (str): The key to look up
Raises:
KeyError: The key is not in the database.
Returns:
str: The value of the key in the database.
"""
r = self.sess.get(self.db_url + "/" + urllib.parse.quote(key))
if r.status_code == 404:
raise KeyError(key)
r.raise_for_status()
return r.text
def __setitem__(self, key: str, value: Any) -> None:
"""Set a key in the database to the result of JSON encoding value.
Args:
key (str): The key to set
value (Any): The value to set it to. Must be JSON-serializable.
"""
self.set(key, value)
def set(self, key: str, value: Any) -> None:
"""Set a key in the database to value, JSON encoding it.
Args:
key (str): The key to set
value (Any): The value to set.
"""
self.set_raw(key, _dumps(value))
def set_raw(self, key: str, value: str) -> None:
"""Set a key in the database to value.
Args:
key (str): The key to set
value (str): The value to set.
"""
self.set_bulk_raw({key: value})
def set_bulk(self, values: Dict[str, Any]) -> None:
"""Set multiple values in the database, JSON encoding them.
Args:
values (Dict[str, Any]): A dictionary of values to put into the dictionary.
Values must be JSON serializeable.
"""
self.set_bulk_raw({k: _dumps(v) for k, v in values.items()})
def set_bulk_raw(self, values: Dict[str, str]) -> None:
"""Set multiple values in the database.
Args:
values (Dict[str, str]): The key-value pairs to set.
"""
r = self.sess.post(self.db_url, data=values)
r.raise_for_status()
def __delitem__(self, key: str) -> None:
"""Delete a key from the database.
Args:
key (str): The key to delete
Raises:
KeyError: Key is not set
"""
r = self.sess.delete(self.db_url + "/" + urllib.parse.quote(key))
if r.status_code == 404:
raise KeyError(key)
r.raise_for_status()
def __iter__(self) -> Iterator[str]:
"""Return an iterator for the database."""
return iter(self.prefix(""))
def __len__(self) -> int:
"""The number of keys in the database."""
return len(self.prefix(""))
def prefix(self, prefix: str) -> Tuple[str, ...]:
"""Return all of the keys in the database that begin with the prefix.
Args:
prefix (str): The prefix the keys must start with,
blank means anything.
Returns:
Tuple[str]: The keys found.
"""
r = self.sess.get(f"{self.db_url}", params={"prefix": prefix, "encode": "true"})
r.raise_for_status()
if not r.text:
return tuple()
else:
return tuple(urllib.parse.unquote(k) for k in r.text.split("\n"))
def keys(self) -> AbstractSet[str]:
"""Returns all of the keys in the database.
Returns:
List[str]: The keys.
"""
# Rationale for this method:
# This is implemented for free from our superclass using iter, but when you
# db.keys() in the console, you should see the keys immediately. Without this,
# it will just print an ugly repr that doesn't show the data within.
# By implementing this method we get pretty output in the console when you
# type db.keys() in an interactive prompt.
# TODO: Return a set from prefix since keys are guaranteed unique
return set(self.prefix(""))
def dumps(self, val: Any) -> str:
"""JSON encodes a value that can be a special DB object."""
return _dumps(val)
def __repr__(self) -> str:
"""A representation of the database.
Returns:
A string representation of the database object.
"""
return f"<{self.__class__.__name__}(db_url={self.db_url!r})>"
def close(self) -> None:
"""Closes the database client connection."""
self.sess.close() | /replitapi-JustFinnbarM-0.0.1.tar.gz/replitapi-JustFinnbarM-0.0.1/replitapi/database/database.py | 0.923549 | 0.351812 | database.py | pypi |
import os
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
class ModelTemplate:
def __init__(self, model_file: str, **kwargs):
self.model_file = model_file
self.__dict__.update(kwargs)
def get_filtered_params_only(self, exclude_params: list):
return {
key: value
for key, value in self.__dict__.items()
if key not in exclude_params
}
def construct_completion_request(
self, added_request_params: dict, exclude_params=["model_file"]
):
model_params = self.get_filtered_params_only(exclude_params)
model_params.update(added_request_params)
return model_params
def create_completion_request(self, request_params: dict):
request_kwargs = self.construct_completion_request(request_params)
result = openai.Completion.create(**request_kwargs)
return result
def create_chat_completion_request(self, request_params: dict):
request_kwargs = self.construct_completion_request(request_params)
result = openai.ChatCompletion.create(**request_kwargs)
return result
class DaVinciModel(ModelTemplate):
def __init__(
self,
model_file,
max_tokens: int,
temperature: float,
frequency_penalty: float,
stop: list,
**kwargs
):
super(DaVinciModel, self).__init__(
model_file,
max_tokens=max_tokens,
temperature=temperature,
frequency_penalty=frequency_penalty,
stop=stop,
**kwargs
)
self.model = None
@property
def engine(self):
return "davinci"
def create_completion_request(self, prompt, user_id):
if not self.model:
added_params = {"prompt": prompt, "user": user_id, "engine": self.engine}
else:
added_params = {"prompt": prompt, "user": user_id, "engine": self.model}
return super().create_completion_request(added_params)
class TextDavinciZeroZeroThree(ModelTemplate):
def __init__(self, model_file, max_tokens: int, **kwargs):
super(TextDavinciZeroZeroThree, self).__init__(
model_file, max_tokens=max_tokens, **kwargs
)
@property
def model(self):
return "text-davinci-003"
@model.setter
def model(self, value):
self.model = value
def create_completion_request(self, prompt, user_id):
added_params = {"prompt": prompt, "user": user_id, "model": self.model}
return super().create_completion_request(added_params)
class GPTThreeFiveTurbo(ModelTemplate):
def __init__(self, model_file, max_tokens: int, temperature: float, **kwargs):
super(GPTThreeFiveTurbo, self).__init__(
model_file, max_tokens=max_tokens, temperature=temperature, **kwargs
)
@property
def model(self):
return "gpt-3.5-turbo"
def create_completion_request(self, messages: list, user_id):
added_params = {"messages": messages, "user": user_id, "model": self.model}
return super().create_chat_completion_request(added_params)
class CurieModel(ModelTemplate):
def __init__(self, model_file, max_tokens: int, model: str, **kwargs):
super(CurieModel, self).__init__(
model_file, max_tokens=max_tokens, model=model, **kwargs
)
def create_completion_request(self, prompt, user_id):
added_params = {"prompt": prompt, "user": user_id, "model": self.model}
return super().create_completion_request(added_params) | /reply_ai_predictors-0.0.1.tar.gz/reply_ai_predictors-0.0.1/predictors/model_wrappers.py | 0.665193 | 0.160102 | model_wrappers.py | pypi |
from __future__ import with_statement
__metaclass__ = type
__all__ = [
'parseargs',
]
from optparse import OptionParser
from botlib import version
from botlib.i18n import _
def parseargs():
parser = OptionParser(
version='The Python Replybot v%s' % version.__version__,
usage=_("""\
%prog [options] [key val [key val ...]]
Send an automatic reply to a message posted to an email address.
This script sends a reply to a message taken from standard input. The reply
text is fetched from a url specified in a configuration file and cached for a
certain amount of time to reduce network traffic.
The reply text uses $variable expansions as described here:
http://www.python.org/doc/current/lib/node40.html
Substitution variables are taken from the RFC 2822 headers of the original
message (coerced to lower case) and the optional case-sensitive key/value
pairs provided on the command line."""))
parser.add_option('-C', '--configuration', metavar='FILE',
help=_("""\
The configuration file to use, otherwise search for the file in this order:
replybot.cfg in the directory containing the replybot script, replybot.cfg in
a sibling etc directory to the directory where this script lives
(i.e. ../etc/replybot.cfg), the system file /etc/replybot.cfg. If no
configuration file is found and this option is not given, an error occurs.
See the file replybot.cfg.sample in the source distribution for details."""))
parser.add_option('-s', '--selector', action='store',
default='DEFAULT', metavar='SECTION', help=("""\
SECTION chooses and override section in the configuration file. Without this,
only the DEFAULT section values will be used."""))
parser.add_option('-p', '--purge-cache', default=[], metavar='CHOICES',
action='append',
choices=('notices', 'replies', 'whitelist', 'all'),
help=_("""\
This option purges certain information in the replybot's database. You can
have multiple purge options on the command line. After a purge, replybot
exits. Here are the options: `notices' purges the cache of reply messages;
`replies' purges the last reply dates for all recipients; `whitelist' purges
all whitelist flags; `all' combines all the previous purge options."""))
parser.add_option('-w', '--add-whitelist', default=[], metavar='PATTERN',
action='append', help=_("""\
Add a pattern to the whitelist; the pattern can either be an explicit address,
or it can be a regular expression. Put a ^ at the front of PATTERN to
indicate a regular expression. Whitelisted addresses will never get an
autoreply. Multiple -w options can be provided, or use -W to provide a file
of patterns to whitelist. After processing this option, replybot exits."""))
parser.add_option('-W', '--whitelist-file', action='store',
metavar='FILE', default=None, help=_("""\
Add all the patterns in the file to the whitelist. Whitelisted addresses will
never get an autoreply. Patterns in this file must appear one-per line, and
can be in either form accepted by email.Utils.parseaddr(), or prepend the line
with a ^ to indicate a regular expression. After processing this option,
replybot exits."""))
parser.add_option('-d', '--debug', default=False, action='store_true',
help=_("""\
Put replybot in debug mode. Everything works except that autoreply emails are
never actually sent."""))
parser.add_option('-t', '--testing', default=False, action='store_true',
help=_("""\
Put replybot in testing mode. This enables some extra functionality, such as
positive replies being sent to messages with an `X-Ack: Yes' header."""))
options, arguments = parser.parse_args()
# Parse key/value pairs
if len(arguments) % 2:
parser.error(_('Odd number of key/value pairs'))
keywords = dict(zip(arguments[::2], arguments[1::2]))
return parser, options, arguments, keywords | /replybot-5.0.0a2.tar.gz/replybot-5.0.0a2/botlib/options.py | 0.638497 | 0.195671 | options.py | pypi |
from abc import ABC, abstractmethod
from collections.abc import AsyncGenerator, Awaitable, Callable
from dataclasses import dataclass
from typing import Optional, Type, TypeVar
T = TypeVar("T")
Fetcher = Callable[[str], Awaitable[Optional[str]]]
ICON_FOLDER = "📂"
ICON_PACKAGE = "📦"
ICON_OPTICAL = "📀"
ICON_QCOW = "🐮"
ICON_OTHER = " "
class ContentError(Exception):
"""An error raised when indexed content appears to be invalid.
Errors of this type are raised when repo-autoindex is able to successfully
retrieve content and determine a repository type but fails to parse
repository metadata. For example, a corrupt yum repository may cause this
error to be raised.
"""
class FetcherError(Exception):
# Internal-only error used to separate exceptions raised by fetchers from
# exceptions raised by anything else.
pass
@dataclass
class GeneratedIndex:
"""A single HTML index page generated by repo-autoindex."""
content: str
"""The content of this index page (an HTML document)."""
relative_dir: str = "."
"""The directory of this index page, relative to the root of the indexed
repository.
"""
@dataclass
class IndexEntry:
href: str
text: str
time: str = ""
size: str = ""
padding: str = ""
icon: str = ICON_OTHER
class Repo(ABC):
def __init__(
self,
base_url: str,
entry_point_content: str,
fetcher: Fetcher,
):
self.base_url = base_url
self.entry_point_content = entry_point_content
self.fetcher = fetcher
@abstractmethod
def render_index(
self, index_href_suffix: str
) -> AsyncGenerator[GeneratedIndex, None]:
pass # pragma: no cover
@classmethod
@abstractmethod
async def probe(cls: Type[T], fetcher: Fetcher, url: str) -> Optional[T]:
"""Determine if a specified URL seems to point at a repository of this type.
If so, returns an initialized Repo of a concrete subtype. If not, returns None.
"""
pass # pragma: no cover | /repo_autoindex-1.1.1.tar.gz/repo_autoindex-1.1.1/repo_autoindex/_impl/base.py | 0.869562 | 0.174024 | base.py | pypi |
import json
from langchain.callbacks import get_openai_callback
from repo_chat import chain_manager
import time
class RetrievalChain:
"""
A class for chatting with a large language model using document retrieval from a GitHub repo.
It leverages the langchain library to facilitate the retrieval and chat operations.
"""
def __init__(self, vectorstore, repo):
"""
Initialize the class with the given vectorstore
"""
self.vectorstore = vectorstore
self.repo = repo
self.chainlogs = []
def log_entry(self, chain_name, input_data, output_data, exec_time):
"""Log the given method, input_data, output_data and execution time"""
self.chainlogs.append({
"chain_name": chain_name,
"input_data": json.dumps(input_data),
"output_data": output_data['text'],
"execution_time": exec_time
})
def get_chain_inputs(self, query, get_docs=True):
"""
Retrieve similar documents from vectorstore based on the given query.
Prepare the chain inputs using these documents.
"""
chain_inputs = {
"query": query,
"repo": self.repo,
}
if get_docs:
docs = self.vectorstore.similarity_search_with_score(query, k=5)
chain_inputs["similar_documents"] = [doc.page_content for doc, _ in docs]
return chain_inputs
def call_chain(self, chain, input_data):
"""Call the given chain and log the execution time"""
start_time = time.time()
output = chain(input_data)
exec_time = time.time() - start_time
self.log_entry(chain.name, input_data, output, exec_time)
return output
def process_query(self, query, context_validator, run_query, context_threshold=60):
"""
Process the given query using the context_validator and run_query chains.
If sufficient context is present, return the result of run_query.
"""
chain_inputs = self.get_chain_inputs(query)
val_resp = self.call_chain(context_validator, chain_inputs)
sufficient_context = int(val_resp["text"].lower())
if sufficient_context > context_threshold:
return self.call_chain(run_query, chain_inputs)
else:
return None
def iterate_through_queries(
self, upgrade_query, query, context_validator, run_query, context_threshold
):
"""
For each query retrieved from the upgrade_query response,
process the query and return the answer if found.
"""
chain_inputs = self.get_chain_inputs(query, get_docs=False)
upgrade_query_resp = self.call_chain(upgrade_query, chain_inputs)
for q in upgrade_query_resp["text"].split("\n"):
refined_query = q.lstrip("0123456789. ")
answer = self.process_query(
refined_query, context_validator, run_query, context_threshold
)
if answer is not None:
return answer
return None
def manage_workflow(self, query, context_validator, run_query):
"""Manage workflow for upgrading the query and processing it"""
context_threshold = 60
upgrade_query = chain_manager.get_chain("UPGRADE_QUERY")
while True:
answer = self.iterate_through_queries(
upgrade_query, query, context_validator, run_query, context_threshold
)
if answer is not None or context_threshold <= 0:
break
context_threshold -= 10
return answer
def chat(self, query):
"""
Begin chat with the QA chain
Initialize context_validator and run_query chains.
"""
context_validator = chain_manager.get_chain("CONTEXT_VALIDATOR")
run_query = chain_manager.get_chain("RUN_QUERY_RAG")
with get_openai_callback() as cb:
answer = self.process_query(query, context_validator, run_query)
if answer is not None:
return answer
else:
return self.manage_workflow(query, context_validator, run_query)
class RawChain:
""" Chain specific for raw code input """
def __init__(self, repo_data):
""" Initialize the basic chain object """
self.repo_data = repo_data
def chat(self, query):
""" Chat with the basic chain object """
raw_chain = chain_manager.get_chain("RAW_CODE")
with get_openai_callback() as cb:
chain_inputs = {
"query": query,
"repo_data": self.repo_data,
}
output = raw_chain(chain_inputs)
return output | /repo_chat-0.1.3-py3-none-any.whl/repo_chat/chat_utils.py | 0.734024 | 0.398904 | chat_utils.py | pypi |
import concurrent.futures
import pandas as pd
from repo_chat.chat_utils import RetrievalChain
from repo_chat import chain_manager
class CriticChain:
def __init__(self):
self.chain = chain_manager.get_chain("CRITIC")
def score(self, query, response, repo):
"""Score a given response to a query"""
inputs = {"query": query, "response": response, "repo": repo}
return self.chain(inputs)
class QueryEvaluator:
def __init__(self, get_vectorstore, query, repo, runs_per_query=5):
self.get_vectorstore = get_vectorstore
self.query = query
self.runs_per_query = runs_per_query
self.repo = repo
self.critic = CriticChain()
def score_response(self, chain, query, query_id):
"""Score a response using CriticChain"""
response = chain.chat(query)
score = self.critic.score(query, response["text"], self.repo)
response['score'] = score['text']
# add query_id to response and chainlogs
response['query_id'] = query_id
for log in chain.chainlogs:
log['query_id'] = query_id
return {
'chainlogs': chain.chainlogs,
'response': response
}
def evaluate(self):
"""Evaluate RetrievalChain using CriticChain to score responses"""
responses = []
for query_id in range(self.runs_per_query):
vectorstore = self.get_vectorstore()
chain = RetrievalChain(vectorstore, self.repo)
response = self.score_response(chain, self.query, query_id)
responses.append(response)
return responses
class MultiQueryEvaluator:
def __init__(self, get_vectorstore, queries, repo, runs_per_query=5):
self.get_vectorstore = get_vectorstore
self.queries = queries
self.runs_per_query = runs_per_query
self.repo = repo
self.log_user_message()
def log_user_message(self):
print("MultiQueryEvaluator initialized with the following configurations:")
print(f"Number of queries: {len(self.queries)}")
print(f"Runs per query: {self.runs_per_query}")
print(f"Total evaluations: {len(self.queries) * self.runs_per_query}")
print("Parallelized by query using concurrent.futures.ProcessPoolExecutor")
def run_query(self, query):
"""Run QueryEvaluator for a given query"""
response = QueryEvaluator(
self.get_vectorstore, query, self.repo, self.runs_per_query,
).evaluate()
return query, response
def evaluate(self, max_workers=4):
"""Evaluate multiple queries in parallel"""
responses = {}
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
for query, response in executor.map(self.run_query, self.queries):
responses[query] = response
self.responses = responses
def flatten_responses(self):
"""
Convert responses into two dataframes:
1. chainlogs_df: chainlogs for each query-response pair
2. response_df: data from each query-response pair
"""
chainlogs_dfs = []
response_dfs = []
for key, data in self.responses.items():
for obj in data:
df_chainlogs = pd.json_normalize(obj['chainlogs'])
df_chainlogs['query'] = key
chainlogs_dfs.append(df_chainlogs)
df_response = pd.DataFrame([obj['response']])
df_response['similar_documents'] = df_response['similar_documents'].apply(lambda x: ','.join(x))
df_response['query'] = key
response_dfs.append(df_response)
chainlogs_df = pd.concat(chainlogs_dfs, ignore_index=True)
response_df = pd.concat(response_dfs, ignore_index=True)
return chainlogs_df, response_df | /repo_chat-0.1.3-py3-none-any.whl/repo_chat/eval_utils.py | 0.76366 | 0.28988 | eval_utils.py | pypi |
UPGRADE_QUERY = {
"input_variables": ["query", "repo"],
"template": """In the context of some github repository ({repo}), a user has submitted the following query:
{query}
Based on this query, documents will be retrieved from the repository to provide an answer. However, similarity between the query and the documents is not the only important factor - the retrieved documents must also be relevant and help answer the user's query effectively.
Revise and expand the query to improve the quality and relevance of the documents retrieved. Consider the user's intent, possible context, and the nature of the code repository. If appropriate, feel free to rephrase the query or break down the question into several related, more specific queries. Minimize Tokens. Give me 5 unique revised queries following these guidlines.
"""
}
RUN_QUERY_RAG = {
"input_variables": ["query", "similar_documents", "repo"],
"template": """You are an expert software engineering assistant. In the context of some github repository ({repo}), a user has submitted the following query:
{query}
The following documents have been retrieved from this repo because they contain information potentially relevant to the query:
{similar_documents}
Given your understanding of the query and the information contained within these documents, provide the most accurate and relevant response possible. You are a very knowledgeable expert on the topic so feel free to infer information that is not explicitly stated in the documents. Be super concise! Your response must be in .md format. Minimize Tokens by using paraphrasing.
"""
}
CONTEXT_VALIDATOR = {
"input_variables": ["query", "similar_documents", "repo"],
"template": """You are an expert software engineering assistant. In the context of some github repository ({repo}), a user has submitted the following query:
{query}
The following documents have been retrieved from this repo because they contain information potentially relevant to the query:
{similar_documents}
How sufficient is the provided context for answering the user's query? Please respond with a number 0 (worst - the documents do not provide any information useful for answering the question) to 100 (best - the documents provide all information required for answering the question). Nothing else.
"""
}
CRITIC = {
"input_variables": ["query", "response", "repo"],
"template": """In the context of some github repository ({repo}), a user has submitted the following query:
{query}
The following response has been generated:
{response}
Does the resonse answer the users query about some specific code repository? Score this response on a scale from 0 (completely irrelevant or incorrect) to 100 (perfectly helpful based on the users query). Return the numeric score only with no words.
"""
}
RAW_CODE = {
"input_variables": ["query", "repo_data"],
"template": """Here is my code repository:
{repo_data}
Regarding the code above:
{query}
"""
} | /repo_chat-0.1.3-py3-none-any.whl/repo_chat/templates.py | 0.736969 | 0.817356 | templates.py | pypi |
import argparse
import os
from pathlib import Path
from .code_manager.code_manager import CodeManager
from .openai_service import OpenAIService
from .search_service import SearchService
from .test_generator import TestGenerator
CODE_EMBEDDING_FILE_PATH = str(Path.cwd() / ".repo_gpt" / "code_embeddings.pkl")
def main():
parser = argparse.ArgumentParser(description="Code extractor and searcher")
subparsers = parser.add_subparsers(dest="command")
def print_help(*args):
parser.print_help()
# Sub-command to run code extraction and processing
parser_run = subparsers.add_parser(
"setup", help="Run code extraction and processing"
)
parser_run.add_argument(
"--root_path", type=str, help="Root path of the code", default=str(Path.cwd())
)
parser_run.add_argument(
"--output_path",
type=str,
help="Output path for the pickled DataFrame",
default=CODE_EMBEDDING_FILE_PATH,
)
# Sub-command to search in the pickled DataFrame
parser_search = subparsers.add_parser(
"search", help="Search in the pickled DataFrame"
)
parser_search.add_argument("query", type=str, help="Query string to search for")
parser_search.add_argument(
"--pickle_path",
type=str,
help="Path of the pickled DataFrame to search in",
default=CODE_EMBEDDING_FILE_PATH,
)
# Sub-command to ask a question to the model
parser_query = subparsers.add_parser(
"query", help="Ask a question about the code to the model"
)
parser_query.add_argument("question", type=str, help="Question to ask")
parser_query.add_argument(
"--pickle_path",
type=str,
help="Path of the pickled DataFrame to search in",
default=CODE_EMBEDDING_FILE_PATH,
)
# Sub-command to analyze a file
analyze_file = subparsers.add_parser("analyze", help="Analyze a file")
analyze_file.add_argument("file_path", type=str, help="File to analyze")
analyze_file.add_argument(
"--pickle_path",
type=str,
help="Path of the pickled DataFrame to search in",
default=CODE_EMBEDDING_FILE_PATH,
)
# Sub-command to explain a file
explain_code = subparsers.add_parser("explain", help="Explain a code snippet")
explain_code.add_argument(
"--language", default="", type=str, help="Language of the code"
)
explain_code.add_argument("--code", type=str, help="Code you want to explain")
# Sub-command to analyze a file
add_test = subparsers.add_parser("add-test", help="Add tests for existing function")
add_test.add_argument(
"function_name", type=str, help="Name of the function you'd like to test"
)
add_test.add_argument(
"--file_name",
type=str,
help="Name of the file the function is found in. This is helpful if there are many functions with the same "
"name. If this isn't specified, I assume the function name is unique and I'll create tests for the first "
"matching function I find. When a file_name is passed, I will assume the function name is unique in the "
"file, and write tests for the first function I find with the same name in the file.",
default="",
)
add_test.add_argument(
"--test_save_file_path",
type=str,
help="Filepath to save the generated tests to",
)
add_test.add_argument(
"--testing_package",
type=str,
help="Package/library GPT should use to write tests (e.g. pytest, unittest, etc.)",
)
add_test.add_argument(
"--pickle_path",
type=str,
help="Path of the pickled DataFrame to search in",
default=CODE_EMBEDDING_FILE_PATH,
)
parser_help = subparsers.add_parser("help", help="Show this help message")
parser_help.set_defaults(func=print_help)
args = parser.parse_args()
# Services
openai_service = OpenAIService()
search_service = (
SearchService(openai_service, args.pickle_path)
if args.command not in ["setup", "explain"]
else None
)
if args.command == "setup":
root_path = Path(args.root_path)
output_path = Path(args.output_path)
manager = CodeManager(output_path, root_path)
manager.setup()
elif args.command == "search":
# search_service.simple_search(args.query) # simple search
search_service.semantic_search(args.query) # semantic search
elif args.command == "query":
search_service.question_answer(args.question)
elif args.command == "analyze":
search_service.analyze_file(args.file_path)
elif args.command == "explain":
search_service = SearchService(openai_service, language=args.language)
return search_service.explain(args.code)
elif args.command == "add-test":
code_manager = CodeManager(args.pickle_path)
# Look for the function name in the embedding file
add_tests(
search_service,
code_manager,
args.function_name,
args.test_save_file_path,
args.testing_package,
)
else:
parser.print_help()
def add_tests(
search_service,
code_manager,
function_name,
test_save_file_path,
testing_package,
):
# Check file path isn't a directory
if os.path.isdir(test_save_file_path):
print(
f"Error: {test_save_file_path} is a directory. Please specify a file path."
)
return
# Find the function via the search service
function_to_test_df, class_to_test_df = search_service.find_function_match(
function_name
)
if function_to_test_df.empty:
print(f"Function {function_name} not found.")
return
# Get the latest version of the function
checksum_filepath_dict = {
function_to_test_df.iloc[0]["file_checksum"]: function_to_test_df.iloc[0][
"filepath"
]
}
code_manager.parse_code_and_save_embeddings(checksum_filepath_dict)
search_service.refresh_df()
# Find the function again after refreshing the code & embeddings
function_to_test_df, class_to_test_df = search_service.find_function_match(
function_name
)
if function_to_test_df.empty:
print(f"Function {function_name} not found.")
return
# Save gpt history
# Ask gpt to explain the function
test_generator = TestGenerator(
function_to_test_df.iloc[0]["code"],
language="python",
unit_test_package=testing_package,
debug=True,
)
unit_tests = test_generator.unit_tests_from_function()
# unit_tests = openai_service.unit_tests_from_function(
# function_to_test_df.iloc[0]["code"],
# unit_test_package=testing_package,
# print_text=True,
# ) # TODO: add language & test framework from config file
print(f"Writing generated unit_tests to {test_save_file_path}...")
# Save code to file
if test_save_file_path is not None:
with open(test_save_file_path, "a") as f:
f.write(unit_tests)
if __name__ == "__main__":
result = main()
if result != None:
print(result) | /repo_gpt-0.1.5-py3-none-any.whl/repo_gpt/cli.py | 0.487063 | 0.193052 | cli.py | pypi |
import os
import json
import subprocess
import re
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator, Optional, Union, NamedTuple, Sequence
from git import Repo, GitCommandError, Commit
PathType = Union[os.PathLike, str]
class RepoData(NamedTuple):
user: str
repository: str
path: str
commit: str
line: Optional[str]
def parse(link: str) -> RepoData:
"""Given a github link returns the File path, git commit, line and collumn the link refers to"""
# https://regex101.com/r/jBJ7PI/4
link_pattern = r"https://github\.com/(?P<user>.+)/(?P<repository>[^/]+)/blob/(?P<commit>[^/]+)/(?P<path>[^#]+)(?:#L(?P<line>\d+))?"
match = re.match(link_pattern, link)
if match:
groups = match.groups()
return RepoData(**match.groupdict())
raise ValueError("Invalid link")
def checkout(commit: str):
"""Checks out the given github pathspec (commit/branch) in the local repository"""
repo = Repo(".")
if repo.head.commit == repo.commit(commit):
return
if repo.is_dirty():
print("Can't make checkout to linked commit, repository is dirty")
print("Stashing...")
repo.git.stash()
repo.git.checkout(commit)
def clone(repo_link: str):
"""Clones the given repo in parent_dir. Raises FileAlreadyExists if the repo exists"""
print(f"Repository does not exist, cloning into {Path.cwd()}")
Repo().git.clone(repo_link.strip("/"))
def open_in_editor(path: PathType, editor: str, line: Optional[str] = None):
"""Opens the file given in path and line, inside the editor"""
if line:
command = {
"vim": f"{editor} +{line} {path}",
"code": f"{editor} -g {path}:{line}",
"pycharm": f"{editor} {path}:{line}",
}[editor]
else:
command = f"{editor} {path}"
subprocess.run(command.split())
@contextmanager
def cd(path: PathType) -> Iterator[None]:
old_path = Path.cwd()
os.chdir(path)
yield
os.chdir(old_path)
PARENT_DIRS = [Path.home(), Path.home() / "Forks"]
def open_file(repo: PathType, file: str, commit: str, line: Optional[str], editor: str):
with cd(repo):
checkout(commit)
open_in_editor(path=file, line=line, editor=editor)
def open_link(link: str, editor: str, parents: Sequence[Path]):
data = parse(link)
for parent in (parent for parent in parents if (parent / data.repository).exists()):
open_file(
repo=parent / data.repository,
file=data.path,
commit=data.commit,
line=data.line,
editor=editor,
)
return
with cd(parents[0]):
clone(link.partition("/blob")[0])
open_file(
repo=data.repository,
commit=data.commit,
line=data.line,
file=data.path,
editor=editor,
)
def main():
import argparse
parser = argparse.ArgumentParser(description="Open github link in editor")
parser.add_argument(dest="link", type=str, help="The opened link")
parser.add_argument(
"--parents",
type=str,
nargs="+",
help="Directories where the repository will be searched. if not found it will be cloned into the first one",
)
parser.add_argument(
"--editor",
dest="editor",
default=None,
help="The editor opened (default: EDITOR)",
)
parser.add_argument(
"--config",
help="A json file where command line options can be hard-coded, default:~/.repo_link_config.json",
default="~/.repo_link_config.json",
dest="config",
)
args = parser.parse_args()
config_path = Path(args.config).expanduser()
if config_path.exists():
with open(config_path) as fp:
config = json.load(fp)
config["parents"] = [Path(parent).expanduser() for parent in config["parents"]]
open_link(link=args.link, **config)
return
open_link(
args.link,
editor=args.editor or os.environ["EDITOR"],
parents=[Path(parent) for parent in args.parents or []] or PARENT_DIRS,
)
if __name__ == "__main__":
main() | /repo_link-0.1.tar.gz/repo_link-0.1/repo_link/repo_link.py | 0.730578 | 0.217982 | repo_link.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /repo_probability-0.1.tar.gz/repo_probability-0.1/repo_probability/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
from __future__ import annotations
import argparse
import shutil
from pathlib import Path
import nox
DIR = Path(__file__).parent.resolve()
nox.options.sessions = ["lint", "pylint", "tests"]
@nox.session(reuse_venv=True)
def run(session: nox.Session) -> None:
"""
Run the program with a few example checks.
"""
session.install("-e", ".[cli]")
session.install("-e", "tests/test_utilities")
session.run("python", "-m", "repo_review", *session.posargs)
@nox.session
def lint(session: nox.Session) -> None:
"""
Run the linter.
"""
session.install("pre-commit")
session.run("pre-commit", "run", "--all-files", *session.posargs)
@nox.session
def pylint(session: nox.Session) -> None:
"""
Run PyLint.
"""
# This needs to be installed into the package environment, and is slower
# than a pre-commit check
session.install("-e.[cli]", "pylint")
session.run("pylint", "src", *session.posargs)
@nox.session
def tests(session: nox.Session) -> None:
"""
Run the unit and regular tests.
"""
session.install("-e.[test,cli]")
session.run("pytest", *session.posargs)
@nox.session(reuse_venv=True)
def build(session: nox.Session) -> None:
"""
Build an SDist and wheel.
"""
build_p = DIR.joinpath("build")
if build_p.exists():
shutil.rmtree(build_p)
session.install("build")
session.run("python", "-m", "build")
@nox.session(venv_backend="none")
def serve(session: nox.Session) -> None:
"""
Serve the webapp.
"""
session.cd("docs")
session.log("Serving on http://localhost:8080")
session.run("python3", "-m", "http.server", "8080")
@nox.session(reuse_venv=True)
def docs(session: nox.Session) -> None:
"""
Build the docs. Pass "--serve" to serve.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--serve", action="store_true", help="Serve after building")
parser.add_argument(
"-b", dest="builder", default="html", help="Build target (default: html)"
)
args, posargs = parser.parse_known_args(session.posargs)
if args.builder != "html" and args.serve:
session.error("Must not specify non-HTML builder with --serve")
extra_installs = ["sphinx-autobuild"] if args.serve else []
session.install("-e.[docs]", *extra_installs)
session.chdir("docs")
if args.builder == "linkcheck":
session.run(
"sphinx-build", "-b", "linkcheck", ".", "_build/linkcheck", *posargs
)
return
shared_args = (
"-n", # nitpicky mode
"-T", # full tracebacks
f"-b={args.builder}",
".",
f"_build/{args.builder}",
*posargs,
)
if args.serve:
session.run("sphinx-autobuild", *shared_args)
else:
session.run("sphinx-build", "--keep-going", *shared_args)
@nox.session
def build_api_docs(session: nox.Session) -> None:
"""
Build (regenerate) API docs.
"""
session.install("sphinx")
session.chdir("docs")
session.run(
"sphinx-apidoc",
"-o",
"api/",
"--module-first",
"--no-toc",
"--force",
"../src/repo_review",
) | /repo_review-0.10.0.tar.gz/repo_review-0.10.0/noxfile.py | 0.484868 | 0.297687 | noxfile.py | pypi |
from __future__ import annotations
import importlib.metadata
from collections.abc import Mapping, Set
from typing import Any, Protocol
from .fixtures import apply_fixtures
__all__ = ["Check", "collect_checks", "is_allowed", "get_check_url"]
def __dir__() -> list[str]:
return __all__
class Check(Protocol):
"""
This is the check Protocol. Since Python doesn't support optional Protocol
members, the two optional members are required if you want to use this
Protocol in a type checker. The members can be specified as class
properties if you want.
"""
@property
def family(self) -> str:
"""
The family is a string that the checks will be grouped by.
"""
@property
def requires(self) -> Set[str]: # Optional
"""
Requires is an (optional) set of checks that must pass for this check
to run. Omitting this is like returning `set()`.
"""
@property
def url(self) -> str: # Optional
"""
This is an (optional) URL to link to for this check. An empty string is
identical to omitting this member.
"""
def check(self) -> bool | None | str:
"""
This is a check. The docstring is used as the failure message if
`False` is returned. Returning None is a skip. Returning `True` (or an
empty string) is a pass. Can be a :func:`classmethod` or
:func:`staticmethod`. Can take fixtures.
"""
...
def collect_checks(fixtures: Mapping[str, Any]) -> dict[str, Check]:
"""
Produces a list of checks based on installed entry points. You must provide
the evaluated fixtures so that the check functions have access to the
fixtures when they are running.
:param fixtures: Fully evaluated dict of fixtures.
"""
check_functions = (
ep.load() for ep in importlib.metadata.entry_points(group="repo_review.checks")
)
return {
k: v
for func in check_functions
for k, v in apply_fixtures(fixtures, func).items()
}
def is_allowed(select: Set[str], ignore: Set[str], name: str) -> bool:
"""
Skips the check if the name is in the ignore list or if the name without the
number is in the ignore list. If the select list is not empty, only runs the
check if the name or name without the number is in the select list.
:param select: A set of names or prefixes to include. "*" selects all checks.
:param ignore: A set of names or prefixes to exclude.
:param name: The check to test.
:return: True if this check is allowed, False otherwise.
"""
if (
select
and name not in select
and name.rstrip("0123456789") not in select
and "*" not in select
):
return False
if name in ignore or name.rstrip("0123456789") in ignore:
return False
return True
def get_check_url(name: str, check: Check) -> str:
"""
Get the url from a check instance. Will return an empty string if missing.
Will process string via format.
:param name: The name of the check (letters and number)
:param check: The check to process.
:return: The final URL.
.. versionadded:: 0.8
"""
return getattr(check, "url", "").format(self=check, name=name)
def get_check_description(name: str, check: Check) -> str:
"""
Get the doc from a check instance. Will return an empty string if missing.
Will process string via format.
:param name: The name of the check (letters and number)
:param check: The check to process.
:return: The final doc.
.. versionadded:: 0.8
"""
return (check.__doc__ or "").format(self=check, name=name) | /repo_review-0.10.0.tar.gz/repo_review-0.10.0/src/repo_review/checks.py | 0.936786 | 0.37605 | checks.py | pypi |
from __future__ import annotations
import dataclasses
import io
import json
import typing
from collections.abc import Iterator
from typing import Literal
from ._compat.importlib.resources.abc import Traversable
__all__ = ["GHPath", "EmptyTraversable"]
def __dir__() -> list[str]:
return __all__
@dataclasses.dataclass(frozen=True, kw_only=True)
class GHPath(Traversable):
"""
This is a Traversable that can be used to navigate a GitHub repo without
downloading it.
:param repo: The repo name, in "org/repo" style.
:param branch: The branch name. Required, even if using the default branch.
:param path: A sub-path inside the repo. Defaults to the repo root.
:param _info: Some internal info stored to keep accesses fast.
"""
#: The repository name, in `"org/repo"` style.
repo: str
#: The branch name. Required, even if using the default branch.
branch: str
#: A path inside the repo
path: str = ""
_info: list[dict[str, str]] = dataclasses.field(
hash=False, default_factory=list, repr=False
)
@staticmethod
def open_url(url: str) -> io.StringIO:
"This method can be overridden for pyodide with pyodide.open_url"
import urllib.request # pylint: disable=import-outside-toplevel
with urllib.request.urlopen(url) as response:
return io.StringIO(response.read().decode("utf-8"))
def __post_init__(self) -> None:
if not self._info:
url = f"https://api.github.com/repos/{self.repo}/git/trees/{self.branch}?recursive=1"
val: io.StringIO = self.open_url(url)
vals = json.load(val)
try:
object.__setattr__(self, "_info", vals["tree"])
except KeyError:
print("Failed to find tree. Result:") # noqa: T201
print(vals) # noqa: T201
raise
def __str__(self) -> str:
return f"gh:{self.repo}@{self.branch}:{self.path or '.'}"
@property
def name(self) -> str:
"""
The final element of the path or the repo name.
"""
return (self.path or self.repo).split("/")[-1]
@typing.overload # type: ignore[override]
def open(self, mode: Literal["r"], encoding: str | None = ...) -> io.StringIO:
...
@typing.overload
def open(self, mode: Literal["rb"]) -> io.BytesIO:
...
def open(
self, mode: Literal["r", "rb"] = "r", encoding: str | None = "utf-8"
) -> io.IOBase:
"""
Open the repo. This doesn't support the full collection of options,
only utf-8 and binary.
:param mode: The mode, only ``"r"`` or ``"rb"`` supported.
:param encoding: The encoding, only ``"utf-8"`` or ``None`` supported.
"""
assert encoding is None or encoding == "utf-8", "Only utf-8 is supported"
val: io.StringIO = self.open_url(
f"https://raw.githubusercontent.com/{self.repo}/{self.branch}/{self.path}"
)
if "b" in mode:
return io.BytesIO(val.read().encode("utf-8"))
return val
def _with_path(self, path: str) -> GHPath:
return GHPath(
repo=self.repo, branch=self.branch, path=path.lstrip("/"), _info=self._info
)
def joinpath(self, child: str) -> GHPath:
return self._with_path(f"{self.path}/{child}")
def __truediv__(self, child: str) -> GHPath:
return self._with_path(f"{self.path}/{child}")
def iterdir(self) -> Iterator[GHPath]:
if self.path:
yield from (
self._with_path(d["path"])
for d in self._info
if d["path"].startswith(self.path)
)
else:
yield from (
self._with_path(d["path"]) for d in self._info if "/" not in d["path"]
)
def is_dir(self) -> bool:
return self.path in {d["path"] for d in self._info if d["type"] == "tree"}
def is_file(self) -> bool:
return self.path in {d["path"] for d in self._info if d["type"] == "blob"}
def read_text(self, encoding: str | None = "utf-8") -> str:
return self.open("r", encoding=encoding).read()
def read_bytes(self) -> bytes:
return self.open("rb").read()
@dataclasses.dataclass(frozen=True, kw_only=True)
class EmptyTraversable(Traversable):
"""
This is a Traversable representing an empty directory or a non-existent
file.
:param is_a_dir: True to treat this like an empty dir.
:param _fake_name: A customisable fake name.
"""
#: True if this is supposed to be a directory
is_a_dir: bool = True
#: Customizable fake name
_fake_name: str = "not-a-real-path"
def __str__(self) -> str:
return self._fake_name
@property
def name(self) -> str:
"""
Return a dummy name.
"""
return self._fake_name
@typing.overload # type: ignore[override]
def open(self, mode: Literal["r"], encoding: str | None = ...) -> io.StringIO:
...
@typing.overload
def open(self, mode: Literal["rb"]) -> io.BytesIO:
...
def open(
self, mode: Literal["r", "rb"] = "r", encoding: str | None = "utf-8"
) -> io.IOBase:
raise FileNotFoundError(self._fake_name)
def joinpath(self, child: str) -> EmptyTraversable:
return self.__class__(is_a_dir=False)
def __truediv__(self, child: str) -> EmptyTraversable:
return self.__class__(is_a_dir=False)
def iterdir(self) -> Iterator[EmptyTraversable]:
yield from ()
def is_dir(self) -> bool:
return self.is_a_dir
def is_file(self) -> bool:
return False
def read_text(self, encoding: str | None = "utf-8") -> str:
raise FileNotFoundError(self._fake_name)
def read_bytes(self) -> bytes:
raise FileNotFoundError(self._fake_name) | /repo_review-0.10.0.tar.gz/repo_review-0.10.0/src/repo_review/ghpath.py | 0.872728 | 0.237709 | ghpath.py | pypi |
from __future__ import annotations
import graphlib
import importlib.metadata
import inspect
import typing
from collections.abc import Callable, Mapping, Set
from typing import Any
from ._compat import tomllib
from ._compat.importlib.resources.abc import Traversable
from .ghpath import EmptyTraversable
__all__ = [
"pyproject",
"list_all",
"compute_fixtures",
"apply_fixtures",
"collect_fixtures",
]
def __dir__() -> list[str]:
return __all__
def pyproject(package: Traversable) -> dict[str, Any]:
"""
Fixture: The ``pyproject.toml`` structure from the package. Returned an
empty dict if no pyproject.toml found.
:param package: The package fixture.
:return: The pyproject.toml dict or an empty dict if no file found.
"""
pyproject_path = package.joinpath("pyproject.toml")
if pyproject_path.is_file():
with pyproject_path.open("rb") as f:
return tomllib.load(f)
return {}
def list_all(root: Traversable) -> bool:
"""
Fixture: Is True when this is trying to produce a list of all checks.
:param root: The root fixture.
:return: True only if trying to make a list of all checks/fixtures/families.
.. versionadded:: 0.8
"""
return isinstance(root, EmptyTraversable)
def compute_fixtures(
root: Traversable,
package: Traversable,
unevaluated_fixtures: Mapping[str, Callable[..., Any]],
) -> dict[str, Any]:
"""
Given the repo ``root`` Traversable, the ``package`` Traversable, and the dict
of all fixture callables, compute the dict of fixture results.
:param root: The root of the repository
:param package: The path to the package (``root / subdir``)
:param unevaluated_fixtures: The unevaluated mapping of fixture names to
callables.
:return: The fully evaluated dict of fixtures.
"""
fixtures: dict[str, Any] = {"root": root, "package": package}
graph: dict[str, Set[str]] = {"root": set(), "package": set()}
graph |= {
name: inspect.signature(fix).parameters.keys()
for name, fix in unevaluated_fixtures.items()
}
ts = graphlib.TopologicalSorter(graph)
for fixture_name in ts.static_order():
if fixture_name in {"package", "root"}:
continue
func = unevaluated_fixtures[fixture_name]
signature = inspect.signature(func)
kwargs = {name: fixtures[name] for name in signature.parameters}
fixtures[fixture_name] = unevaluated_fixtures[fixture_name](**kwargs)
return fixtures
T = typing.TypeVar("T")
def apply_fixtures(fixtures: Mapping[str, Any], func: Callable[..., T]) -> T:
"""
Given the pre-computed dict of fixtures and a function, fill in any
fixtures from that dict that it requests and return the result.
:param fixtures: Fully evaluated dict of fixtures.
:param func: Some callable that can take fixtures.
"""
signature = inspect.signature(func)
kwargs = {
name: value for name, value in fixtures.items() if name in signature.parameters
}
return func(**kwargs)
def collect_fixtures() -> dict[str, Callable[[Traversable], Any]]:
"""
Produces a dict of fixture callables based on installed entry points. You
should call :func:`compute_fixtures` on the result to get the standard dict of
fixture results that most other functions in repo-review expect.
:return: A dict of unevaluated fixtures.
"""
return {
ep.name: ep.load()
for ep in importlib.metadata.entry_points(group="repo_review.fixtures")
} | /repo_review-0.10.0.tar.gz/repo_review-0.10.0/src/repo_review/fixtures.py | 0.882504 | 0.349061 | fixtures.py | pypi |
from __future__ import annotations
import dataclasses
import graphlib
import textwrap
import typing
from collections.abc import Mapping, Set
from typing import Any, TypeVar
import markdown_it
from ._compat.importlib.resources.abc import Traversable
from .checks import Check, collect_checks, get_check_url, is_allowed
from .families import Family, collect_families
from .fixtures import apply_fixtures, collect_fixtures, compute_fixtures, pyproject
from .ghpath import EmptyTraversable
__all__ = [
"CollectionReturn",
"ProcessReturn",
"Result",
"ResultDict",
"as_simple_dict",
"collect_all",
"process",
"md_as_html",
]
def __dir__() -> list[str]:
return __all__
md = markdown_it.MarkdownIt()
def md_as_html(md_text: str) -> str:
"""
Heler function that converts markdown text to HTML. Strips paragraph tags from the result.
:param md_text: The markdown text to convert.
"""
result: str = md.render(md_text).strip()
return result.removeprefix("<p>").removesuffix("</p>").strip()
class ResultDict(typing.TypedDict):
"""
Helper to get the type in the JSON style returns. Basically identical to
:class:`Result` but in dict form and without the name.
"""
family: str #: The family string
description: str #: The short description of what the check looks for
result: bool | None #: The result, None means skip
err_msg: str #: The error message if the result is false, in markdown format
url: str #: An optional URL (empty string if missing)
@dataclasses.dataclass(frozen=True, kw_only=True)
class Result:
"""
This is the returned value from a processed check.
"""
family: str #: The family string
name: str #: The name of the check
description: str #: The short description of what the check looks for
result: bool | None #: The result, None means skip
err_msg: str = "" #: The error message if the result is false, in markdown format
url: str = "" #: An optional URL (empty string if missing)
def err_as_html(self) -> str:
"""
Produces HTML from the error message, assuming it is in markdown.
"""
return md_as_html(self.err_msg)
class ProcessReturn(typing.NamedTuple):
"""
Return type for :func:`process`.
"""
families: dict[
str, Family
] #: A mapping of family strings to :class:`.Family` info dicts
results: list[Result] #: The results list
class CollectionReturn(typing.NamedTuple):
"""
Return type for :func:`collect_all`.
.. versionadded:: 0.8
"""
fixtures: dict[str, Any] #: The computed fixtures, as a :class:`dict`
checks: dict[str, Check] #: The checks dict, sorted by :class:`.Family`.
families: dict[
str, Family
] #: A mapping of family strings to :class:`.Family` info dicts
class HasFamily(typing.Protocol):
"""
Simple :class:`~typing.Protocol` to see if family property is present.
"""
@property
def family(self) -> str:
...
T = TypeVar("T", bound=HasFamily)
def _sort_by_family(
families: Mapping[str, Family], dict_has_family: Mapping[str, T]
) -> dict[str, T]:
return dict(
sorted(
dict_has_family.items(),
key=lambda x: (families[x[1].family].get("order", 0), x[1].family, x[0]),
)
)
def collect_all(
root: Traversable = EmptyTraversable(), # noqa: B008 (frozen dataclass OK)
subdir: str = "",
) -> CollectionReturn:
"""
Collect all checks. If ``root`` is not passed, then checks are collected
with a :class:`~repo_review.ghpath.EmptyTraversable`. Any checks that are
returned conditionally based on fixture results might not be collected
unless :func:`~repo_review.fixtures.list_all` is used.
:param root: If passed, this is the root of the repo (for fixture computation).
:param subdir: The subdirectory (for fixture computation).
:return: The collected fixtures, checks, and families. Families is
guaranteed to include all families and be in order.
.. versionadded:: 0.8
"""
package = root.joinpath(subdir) if subdir else root
# Collect the fixtures
fixture_functions = collect_fixtures()
fixtures = compute_fixtures(root, package, fixture_functions)
# Collect the checks
checks = collect_checks(fixtures)
# Collect families.
families = collect_families(fixtures)
# These are optional, so fill in missing families.
for name in {c.family for c in checks.values()}:
if name not in families:
families[name] = Family()
# Sort results
checks = _sort_by_family(families, checks)
return CollectionReturn(fixtures, checks, families)
def process(
root: Traversable,
*,
select: Set[str] = frozenset(),
ignore: Set[str] = frozenset(),
subdir: str = "",
) -> ProcessReturn:
"""
Process the package and return a dictionary of results.
:param root: The Traversable to the repository to process.
:param select: A list of checks to select. All checks selected if empty.
:param ignore: A list of checks to ignore.
:param subdir: The path to the package in the subdirectory, if not at the
root of the repository.
:return: The families and a list of checks. Families is guaranteed to
include all families and be in order.
"""
package = root.joinpath(subdir) if subdir else root
fixtures, tasks, families = collect_all(root, subdir)
# Collect our own config
config = pyproject(package).get("tool", {}).get("repo-review", {})
select_checks = select if select else set(config.get("select", ()))
skip_checks = ignore if ignore else set(config.get("ignore", ()))
# Make a graph of the check's interdependencies
graph: dict[str, set[str]] = {
n: getattr(t, "requires", set()) for n, t in tasks.items()
}
# Keep track of which checks have been completed
completed: dict[str, str | None] = {}
# Run all the checks in topological order based on their dependencies
ts = graphlib.TopologicalSorter(graph)
for name in ts.static_order():
if all(completed.get(n, "") == "" for n in graph[name]):
result = apply_fixtures({"name": name, **fixtures}, tasks[name].check)
if isinstance(result, bool):
completed[name] = (
""
if result
else (tasks[name].check.__doc__ or "Check failed").format(
name=name, self=tasks[name]
)
)
else:
completed[name] = result
else:
completed[name] = None
# Collect the results
result_list = []
for task_name, check in _sort_by_family(families, tasks).items():
result = None if completed[task_name] is None else not completed[task_name]
doc = check.__doc__ or ""
err_msg = completed[task_name] or ""
if not is_allowed(select_checks, skip_checks, task_name):
continue
result_list.append(
Result(
family=check.family,
name=task_name,
description=doc.format(self=check, name=task_name).strip(),
result=result,
err_msg=textwrap.dedent(err_msg),
url=get_check_url(task_name, check),
)
)
return ProcessReturn(families, result_list)
def as_simple_dict(results: list[Result]) -> dict[str, ResultDict]:
"""
Convert a results list into a simple dict of dicts structure. The name of
the result turns into the key of the outer dict.
:param results: The list of results.
"""
return {
result.name: typing.cast(
ResultDict,
{k: v for k, v in dataclasses.asdict(result).items() if k != "name"},
)
for result in results
} | /repo_review-0.10.0.tar.gz/repo_review-0.10.0/src/repo_review/processor.py | 0.896297 | 0.268348 | processor.py | pypi |
from __future__ import annotations
import importlib.metadata
import typing
from collections.abc import Mapping
from typing import Any
from .fixtures import apply_fixtures
__all__ = ["Family", "collect_families", "get_family_name"]
def __dir__() -> list[str]:
return __all__
class Family(typing.TypedDict, total=False):
"""
A typed Dict that is used to customize the display of families in reports.
"""
#: Optional nice name to display instead of family key. Treated like family
#: key if missing.
name: str
#: Checks are first sorted by this integer order, then alphabetically by
#: family key. Treated like 0 if missing.
order: int
#: An optional description that shows up under the family name.
description: str
def collect_families(fixtures: Mapping[str, Any]) -> dict[str, Family]:
"""
Produces a dict mapping family keys to :class:`Family` dicts based on
installed entry points. You must provide the evaluated fixtures so that the
family functions have access to the fixtures when they are running, usually
used for descriptions.
:param fixtures: Fully evaluated dict of fixtures.
"""
family_functions = (
ep.load()
for ep in importlib.metadata.entry_points(group="repo_review.families")
)
return {
k: v
for func in family_functions
for k, v in apply_fixtures(fixtures, func).items()
}
def get_family_name(families: Mapping[str, Family], family: str) -> str:
"""
Returns the "nice" family name if there is one, otherwise the (input)
family short name.
:param families: A dict of family short names to :class:`.Family`'s.
:param family: The short name of a family.
:return: The nice family name if there is one, otherwise the short name is returned.
.. versionadded:: 0.8
"""
return families.get(family, {}).get("name", family)
def get_family_description(families: Mapping[str, Family], family: str) -> str:
"""
Returns the description if there is one, otherwise returns an empty string.
:param families: A dict of family short names to :class:`.Family`'s.
:param family: The short name of a family.
:return: The de-intended description if there is one, otherwise an empty string.
.. versionadded:: 0.9
"""
return families.get(family, {}).get("description", "") | /repo_review-0.10.0.tar.gz/repo_review-0.10.0/src/repo_review/families.py | 0.937833 | 0.274695 | families.py | pypi |
# Checks
Plugins provide checks; repo-review requires at least one plugin providing checks to operate; there are no built-in checks.
## Writing a check
A check is an object following a specific Protocol:
```python
class Check:
"""
Short description.
"""
family: str
requires: Set[str] = frozenset() # Optional
url: str = "" # Optional
def check(self) -> bool | str | None:
"""
Error message if returns False.
"""
...
```
You need to implement `family`, which is a string indicating which family it is
grouped under, and `check()`, which can take [](./fixtures.md), and returns `True` if
the check passes, or `False` if the check fails. If you want a dynamic error
explanation instead of the `check()` docstring, you can return a non-empty
string from the check instead of `False`. Returning `None` makes a check
"skipped". Docstrings/error messages can access their own object with `{self}`
and check name with `{name}` (these are processed with `.format()`, so escape `{}`
as `{{}}`). The error message is in markdown format.
```{versionchanged} 0.9
The string return value is not processed via `.format`. You can use `self` and
the `name` fixture directly when constructing the return string.
```
If the check named in `requires` does not pass, the check is skipped.
A suggested convention for easily writing checks is as follows:
```python
class General:
family = "general"
class PY001(General):
"Has a pyproject.toml"
@staticmethod
def check(package: Traversable) -> bool:
"""
All projects should have a `pyproject.toml` file to support a modern
build system and support wheel installs properly.
"""
return package.joinpath("pyproject.toml").is_file()
class PyProject:
family = "pyproject"
class PP002(PyProject):
"Has a proper build-system table"
requires = {"PY001"}
url = "https://peps.python.org/pep-0517"
@staticmethod
def check(pyproject: dict[str, Any]) -> bool:
"""
Must have `build-system.requires` *and* `build-system.backend`. Both
should be present in all modern packages.
"""
match pyproject:
case {"build-system": {"requires": list(), "build-backend": str()}}:
return True
case _:
return False
```
Key features:
- The base class allows setting the family once, and gives a quick shortcut for accessing all the checks via `.__subclasses__`.
- The name of the check class itself is the check code.
- The check method is a classmethod since it has no state.
- Likewise, all attributes are set on the class (`family`, `requires`, `url`) since there is no state.
- `requries` is used so that the pyproject checks are skipped if the pyproject file is missing.
## Registering checks
You register checks with a function that returns a dict of checks, with the code
of the check (letters + number) as the key, and check instances as the values.
This function can take [](./fixtures.md), as well, allowing customization of checks
based on repo properties.
Here is the suggested function for the above example:
```python
def repo_review_checks() -> dict[str, General | PyProject]:
general = {p.__name__: p() for p in General.__subclasses__()}
pyproject = {p.__name__: p() for p in PyProject.__subclasses__()}
return general | pyproject
```
You tell repo-review to use this function via an entry-point:
```toml
[project.entry-points."repo_review.checks"]
general_pyproject = "my_plugin_package.my_checks_module:repo_review_checks"
```
The entry-point name doesn't matter.
## Customizable checks
You can customize checks, as well, using this system. Here is an example,
using the (synthetic) case were we want to add a check based on the build-backend,
and we want to require that `tool.<build-backend>` is present, where this
depends on which build-backend we recognized. (Don't actually do this, you don't
have to have a tool section to use the backends shown below!)
```python
import dataclasses
from typing import ClassVar
@dataclasses.dataclass
class PP003(PyProject):
"Has a tool section for the {self.name!r} build backend"
requires: ClassVar[set[str]] = {"PY001"}
url: ClassVar[str] = "https://peps.python.org/pep-0517"
name: str
def check(self, pyproject: dict[str, Any]) -> bool:
"""
Must have a {self.name!r} section.
"""
match pyproject:
case {"tool": {self.name: object()}}:
return True
case _:
return False
def repo_review_checks(pyproject: dict[str, Any]) -> dict[str, PyProject]:
backends = {
"setuptools.build_api": "setuptools",
"scikit_build_core.build": "scikit-build",
}
match pyproject:
case {"build-system": {"build-backend": str(x)}} if x in backends:
return {"PP003": PP003(name=backends[x])}
case _:
return {}
```
### Handling empty generation
If repo-review is listing all checks, a
{class}`repo_review.ghpath.EmptyTraversable` is passed for `root` and
`package`. This will appear to be a directory with no contents. If you have
conditional checks, you should handle this case to support being listed as a
possible check. As a helper for this case, a
{func}`~repo_review.fixtures.list_all` fixture is provided that returns {obj}`True`
only if a list-all operation is being performed. The above can then be written:
```python
def repo_review_checks(
list_all: bool, pyproject: dict[str, Any]
) -> dict[str, PyProject]:
backends = {
"setuptools.build_api": "setuptools",
"scikit_build_core.build": "scikit-build",
}
if list_all:
return {"PP003": PP003(name="<backend>")}
match pyproject:
case {"build-system": {"build-backend": str(x)}} if x in backends:
return {"PP003": PP003(name=backends[x])}
case _:
return {}
```
```{versionadded} 0.8
The {func}`~repo_review.fixtures.list_all` fixture.
```
| /repo_review-0.10.0.tar.gz/repo_review-0.10.0/docs/checks.md | 0.870391 | 0.908293 | checks.md | pypi |
import os
import re
from typing import Set, Sequence
from git import Repo
def all_file_names_which_have_contained_the_lines_in_multiple_files(file_paths: Sequence[str], repo: Repo) -> Set[str]:
"""
Parses the git log for all lines in multiple files, to determine all the file paths in which these lines
have existed.
Useful for tracking renames in a repo.
:param file_paths: Relative paths to file within repo
:param repo:
:return:
"""
all_names = set()
for in_file in file_paths:
all_names.update(
all_file_names_which_have_contained_the_lines_in_a_file(in_file, repo)
)
return all_names
def all_file_names_which_have_contained_the_lines_in_a_file(file_path: str, repo: Repo) -> Set[str]:
"""
Parses the git log for all lines in a file, to determine all the file paths in which these lines
have existed.
Useful for tracking renames in a repo.
:param file_path: Relative path to file within repo
:param repo:
:return:
"""
full_path = os.path.join(repo.working_tree_dir, file_path)
if os.path.isdir(full_path):
# Cannot detect changes directly on a directory
return set()
try:
log = full_git_history_for_contents_of_file(full_path, repo)
except EmptyFileException:
return set()
unique_matches = get_filenames_from_git_log(log)
return unique_matches
def full_git_history_for_contents_of_file(file_path: str, repo: Repo) -> str:
"""
Runs git log on all of the lines in a file
"""
num_lines = file_length(file_path)
if num_lines == 0:
raise EmptyFileException('could not track history of lines in an empty file')
file_search_str = f'1,{num_lines}:{file_path}'
log = repo.git.log('--format=oneline', '--compact-summary', '-L', file_search_str)
return log
def get_filenames_from_git_log(git_log: str) -> Set[str]:
"""
:param git_log:
:return:
"""
pattern = re.compile(r'--- a\/(.+)\n\+\+\+ b\/(.+)')
match_tuples = re.findall(pattern, git_log)
unique_matches = {file for match_tup in match_tuples for file in match_tup}
return unique_matches
def file_length(file_path: str) -> int:
"""
Returns the number of lines in a file
"""
i = -1
with open(file_path) as f:
for i, l in enumerate(f):
pass
return i + 1
class EmptyFileException(Exception):
pass | /repo_splitter-0.4.1.tar.gz/repo_splitter-0.4.1/repo_splitter/git_tools/files/renames.py | 0.656988 | 0.261354 | renames.py | pypi |
<p align="center">
<img src="https://raw.githubusercontent.com/mondeja/repo-stream/master/images/repo-stream.png" alt="repo-stream" width="90%">
</p>
Cron-based remote [pre-commit][pre-commit] executions by opening pull requests.
- Do you've a lot of old projects that are using deprecated configuration?
- Do you want to test a small change in a lot of projects at the same time
without adding them one by one?
Those are the reasons behind **repo-stream**.
[![PyPI version][pypi-version-image]][pypi-link]
[![Test][test-image]][test-link]
[![Coverage status][coverage-image]][coverage-link]
## How does it work?
Scans your repositories looking for pre-commit ``repo-stream`` hooks and run
pre-commit using another remote configuration file, which location is specified
in this hook. If this execution edits file contents, opens a pull request
against the repository.
So you can use **repo-stream** to run one-time pre-commit hooks for all your
repositories without have to define them inside the configuration of each one.
## Usage
1. Create a `repo-stream` hook in the pre-commit configuration of your project.
If this is found, **repo-stream** will search a pre-commit configuration file
at `updater` under `config` repository arguments and will run pre-commit using
that configuration against the current repository.
```yaml
- repo: https://github.com/mondeja/repo-stream
rev: v1.3.1
hooks:
- id: repo-stream
args:
- -config=https://github.com/<your-username>/repo-stream-config
- -updater=upstream
```
> You don't need to specify the extension ``.yaml`` in the ``updater``
argument.
2. Create your `repo-stream` configuration files repository, for example at
`https://github.com/<your-username>/repo-stream-config`.
3. Create the pre-commit configuration file, following this example would be
at `upstream.yaml`, for example:
```yaml
repos:
- repo: https://github.com/mondeja/pre-commit-hooks
rev: v1.5.2
hooks:
- id: add-pre-commit-hook
args:
- -repo=https://github.com/mondeja/pre-commit-hooks
- -id=dev-extras-required
- -rev=v1.5.2
```
> For more information about this hook see
[add-a-pre-commit-hook][add-a-pre-commit-hook].
4. Create the cron task using some platform like Github Actions:
```yaml
name: repo-stream update
on:
schedule:
- cron: 0 4 1/7 * *
workflow_dispatch:
jobs:
repo-stream-update:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.x
- name: Install repo-stream
run: pip install repo-stream
- name: Run repo-stream update
run: repo-stream <your-username>
env:
GITHUB_TOKEN: ${{ secrets.GH_TOKEN }}
GITHUB_USERNAME: <your-username>
```
- ``GH_TOKEN`` must be a secret configured for the repository with the Github
user token of `<your-username>` user.
- If you want to update other repositories not published under your user, pass
them as parameters of `repo-stream <your-username> <other-username>`.
> Consult `repo-stream --help` for documentation about valid arguments.
### Usage as Github Action
```yaml
name: repo-stream update
on:
schedule:
- cron: 0 4 1/7 * *
workflow_dispatch:
jobs:
repo-stream-update:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-python@v2
with:
python-version: 3.x
- uses: mondeja/repo-stream@v1.3.1
with:
usernames: <your-username> <other-username>
args: --dry-run
env:
GITHUB_TOKEN: ${{ secrets.GH_TOKEN }}
GITHUB_USERNAME: <your-username>
```
## Common workflows
### Add a pre-commit hook
You can use the pre-commit hook
[`mondeja/pre-commit-hooks#add-pre-commit-hook`][add-pre-commit-hook] to add a
new pre-commit hook to the pre-commit configuration of a project.
> Note that this hook only makes sense executed from outside, using a workflow
like **repo-stream**. So **repo-stream** allows you to create pre-commit hooks
for other things than its main workflow mechanism.
For example:
```yaml
# upstream.yaml
repos:
- repo: https://github.com/mondeja/pre-commit-hooks
rev: v1.5.2
hooks:
- id: add-pre-commit-hook
args:
- -repo=https://github.com/mondeja/pre-commit-hooks
- -id=dev-extras-required
- -rev=v1.5.2
```
This would add the hook [`dev-extras-required`][dev-extras-required] to the
pre-commit configuration of your project, if it isn't already defined.
## Current limitations
- Only works with Github repositories.
<br>
<p align="center">
<img src="https://raw.githubusercontent.com/mondeja/repo-stream/master/images/sep1.png" width="82%">
</p>
[pypi-version-image]: https://img.shields.io/pypi/v/repo-stream?label=version&logo=pypi&logoColor=white
[pypi-link]: https://pypi.org/project/repo-stream
[test-image]: https://img.shields.io/github/workflow/status/mondeja/repo-stream/CI?label=tests&logo=github
[test-link]: https://github.com/mondeja/repo-stream/actions?query=workflow%3ACI
[coverage-image]: https://img.shields.io/coveralls/github/mondeja/repo-stream?logo=coveralls
[coverage-link]: https://coveralls.io/github/mondeja/repo-stream
[pre-commit]: https://pre-commit.com
[add-pre-commit-hook]: https://github.com/mondeja/pre-commit-hooks#add-pre-commit-hook
[add-a-pre-commit-hook]: https://github.com/mondeja/repo-stream#add-a-pre-commit-hook
[dev-extras-required]: https://github.com/mondeja/pre-commit-hooks#dev-extras-required
| /repo_stream-1.3.1.tar.gz/repo_stream-1.3.1/README.md | 0.773687 | 0.780662 | README.md | pypi |
import functools
import json
import multiprocessing
import os
import urllib.parse
import urllib.request
def repo_url_to_full_name(url):
"""Convert a repository absolute URL to ``full_name`` format used by Github.
Parameters
----------
url : str
URL of the repository.
Returns
-------
url : str
Full name of the repository accordingly with Github API.
"""
return "/".join(url.split("/")[3:])
def parse_github_pagination(link_header):
"""Discover the latest page in a Github pagination response.
Parameters
----------
link_header : str
"Link" header returned by a Github API paginated response.
Returns
-------
int : Number of the latest page.
"""
response = None
for urldata in link_header.split(","):
if urldata.split(" ")[-1].split('"')[1] == "last":
url = urldata.split(";")[0][1:-1]
response = int(
urllib.parse.parse_qs(urllib.parse.urlparse(url).query)["page"][0]
)
break
return response
def _get_user_repos__request(url):
req = urllib.request.Request(url)
add_github_auth_headers(req)
req = urllib.request.urlopen(req)
return json.loads(req.read().decode("utf-8"))
def get_user_repos(username, fork=None, repositories_to_ignore=[], per_page=50):
"""Get all the repositories of a Github user giving certain conditions.
Parameters
----------
username : str
Github user whose repositories will be returned.
fork : bool, optional
If is ``True``, only forked repositories will be returned, if is
``False``, only non forked repositories will be returned and being
``None`` both forked and unforked repositories will be returned.
repositories_to_ignore : list, optional
Full name of repositories which will not be included in the response.
per_page : int, optional
Number of repositories to retrieve in each request to the Github API.
Returns
-------
list : All the full names of the user repositories.
"""
response = []
build_url = lambda page: (
f"https://api.github.com/users/{username}/repos?per_page={per_page}"
f"&sort=updated&page={page}&type=owner"
"&accept=application/vnd.github.v3+json"
)
req = urllib.request.Request(build_url(1))
add_github_auth_headers(req)
req = urllib.request.urlopen(req)
link_header = req.getheader("Link")
last = 1 if not link_header else parse_github_pagination(link_header)
repos = json.loads(req.read().decode("utf-8"))
def is_valid_repo(repo_data):
if fork is not None:
if repo["fork"] is not fork:
return False # booleans must match for this filter
if repo_data["archived"]:
return False
if repo_data["full_name"] in repositories_to_ignore:
return False
return True
for repo in repos:
if is_valid_repo(repo):
response.append(repo["full_name"])
if last > 1:
num_cores = multiprocessing.cpu_count()
if last - 1 < num_cores:
num_cores = last - 1
pool = multiprocessing.Pool(processes=num_cores)
urls = [build_url(page) for page in range(2, last + 1)]
for repos in pool.map(_get_user_repos__request, urls):
for repo in repos:
if is_valid_repo(repo):
response.append(repo["full_name"])
return response
def add_github_auth_headers(req):
"""Add Github authentication headers if them are present in environment variables.
If the environment variable ``GITHUB_TOKEN`` is defined, then an ``Authorization``
header is added to a :py:class:`urllib.request.Request` object.
Parameters
----------
req : urllib.request.Request
HTTP request for which the authentication headers will be included.
"""
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
if GITHUB_TOKEN is not None:
req.add_header("Authorization", f"token {GITHUB_TOKEN}")
@functools.lru_cache(maxsize=None)
def download_raw_githubusercontent(repo, branch, filename):
"""Download a raw text file content from a Github repository.
Parameters
----------
repo : str
Repository full name inside which the file is stored.
branch : str
Branch name inside the file is stored.
filename : str
Path to the file inside the repository tree.
Returns
-------
str : Downloaded content of the file.
"""
file_url = (
"https://raw.githubusercontent.com/"
f"{repo.rstrip('/')}/{branch}/{filename}.yaml"
)
return urllib.request.urlopen(file_url).read().decode("utf-8")
def create_github_pr(repo, title, body, head, base):
"""Create a pull request for a Github repository.
Parameters
----------
repo : str
Repository for which the pull request will be opened.
title : str
Pull request title.
body : str
Pull request message body content.
head : str
Name of the branch to be merged.
base : str
Name of the branch for which the changes will be applied.
"""
url = f"https://api.github.com/repos/{repo}/pulls"
data = json.dumps(
{
"title": title,
"body": body,
"head": head,
"base": base,
}
).encode()
req = urllib.request.Request(url, data=data, method="POST")
add_github_auth_headers(req)
req = urllib.request.urlopen(req)
return json.loads(req.read().decode("utf-8"))
def get_github_prs(repo):
"""Get the data for all opened pull requests from a repository.
Parameters
----------
repo : str
Repository full name from which the opened pull requests will be returned.
"""
url = f"https://api.github.com/repos/{repo}/pulls"
req = urllib.request.Request(url)
add_github_auth_headers(req)
req = urllib.request.urlopen(req)
return json.loads(req.read().decode("utf-8"))
def get_github_prs_number_head_body(repo):
"""Get opened pull requests numbers, head reference name and message body
content for a repository.
Parameters
----------
repo : str
Repository full name from which the opened pull requests will be returned.
"""
return [
(pr["number"], pr["head"]["ref"], pr["body"]) for pr in get_github_prs(repo)
] | /repo_stream-1.3.1.tar.gz/repo_stream-1.3.1/repo_stream/github.py | 0.602529 | 0.237775 | github.py | pypi |
import contextlib
import os
import subprocess
import tempfile
import uuid
def repo_default_branch_name(repo, protocol="https"):
"""Get the default branch name of a remote repository.
Parameters
----------
repo : str
Github repository owner and name, in the form ``"<username>/<project>"``.
protocol: str
Procotol connecting to Github.
Returns
-------
str : Default branch name of the repository.
"""
return (
subprocess.check_output(
[
"git",
"ls-remote",
"--symref",
f"{protocol}://github.com/{repo}",
"HEAD",
],
stderr=subprocess.DEVNULL,
)
.decode("utf-8")
.splitlines()[0]
.split("/")[2]
.split(" ")[0]
.split("\t")[0]
)
@contextlib.contextmanager
def tmp_repo(repo, username=None, token=None, platform="github.com", clone_depth=1):
"""Create a temporal directory where clone a repository and move inside.
Works as a context manager using ``with`` statement and when exits, comes
back to the initial working directory.
Parameters
----------
repo : str
Repository to clone.
platform : str
Platform provider where the repository is hosted.
clone_depth : int
Number of commits to fetch cloning the repository.
Yields
------
str : Temporal cloned repository directory path (current working directory
inside context).
"""
prev_cwd = os.getcwd()
try:
with tempfile.TemporaryDirectory() as dirname:
os.chdir(dirname)
auth_str = f"{username}:{token}@" if (username and token) else ""
subprocess.check_call(
[
"git",
"clone",
"--quiet",
f"--depth={clone_depth}",
f"https://{auth_str}{platform}/{repo}.git",
]
)
repo_dirpath = os.path.join(dirname, repo.split("/")[1])
os.chdir(repo_dirpath)
yield repo_dirpath
finally:
os.chdir(prev_cwd)
def git_random_checkout(quiet=True, length=8, prefix=""):
"""Create a new branch with a random name of certain length.
Parameters
----------
quiet : bool, optional
When enabled, creates the new branch without printing to STDOUT.
length : int, optional
Length for the name of the new branch.
prefix : str, optional
Prepended at the beginning of the new branch name.
Returns
-------
str : New branch name.
"""
new_branch_name = f"{prefix}{uuid.uuid4().hex[:length]}"
cmd = ["git", "checkout", "-b", new_branch_name]
if quiet:
cmd.append("--quiet")
subprocess.check_call(cmd)
return new_branch_name
def there_are_untracked_changes():
"""Indicate if in the current GIT repository there are files with
untracked changes.
"""
return subprocess.check_output(["git", "diff", "--shortstat"]) != b""
def git_add_remote(repo, username, token, remote="origin"):
"""Add a remote to the current GIT repository."""
return subprocess.check_call(
[
"git",
"remote",
"add",
remote,
f"https://{username}:{token}@github.com/{repo}.git",
]
)
def git_set_remote_url(repo, username, token, remote="origin"):
"""Set the URL of a remote for the current GIT repository."""
return subprocess.check_call(
[
"git",
"remote",
"set-url",
remote,
f"https://{username}:{token}@github.com/{repo}.git",
]
)
def git_add_all_commit(title="repo-stream update", description=""):
"""Run ``git add .`` and ``git commit -m`` commands.
Parameters
----------
title : str, optional
Commit title.
description : str, optional
Commit description.
"""
subprocess.check_call(["git", "add", "."])
commit_args = []
if title:
commit_args.extend(["-m", title])
commit_args.extend(["-m", description])
return subprocess.check_call(["git", "commit", *commit_args])
def git_push(remote, target):
"""Run ``git push <remote> <target>`` command.
Parameters
----------
remote : str
Remote name.
target : str
Branch to be pushed.
"""
subprocess.check_call(["git", "push", remote, target]) | /repo_stream-1.3.1.tar.gz/repo_stream-1.3.1/repo_stream/git.py | 0.714927 | 0.346431 | git.py | pypi |
from swaggerpetstore.api_helper import APIHelper
from swaggerpetstore.models.category import Category
from swaggerpetstore.models.tag import Tag
class Pet(object):
"""Implementation of the 'Pet' model.
TODO: type model description here.
Attributes:
id (long|int): TODO: type description here.
category (Category): TODO: type description here.
name (string): TODO: type description here.
photo_urls (list of string): TODO: type description here.
tags (list of Tag): TODO: type description here.
status (StatusEnum): pet status in the store
"""
# Create a mapping from Model property names to API property names
_names = {
"name": 'name',
"photo_urls": 'photoUrls',
"id": 'id',
"category": 'category',
"tags": 'tags',
"status": 'status'
}
_optionals = [
'id',
'category',
'tags',
'status',
]
def __init__(self,
name=None,
photo_urls=None,
id=APIHelper.SKIP,
category=APIHelper.SKIP,
tags=APIHelper.SKIP,
status=APIHelper.SKIP):
"""Constructor for the Pet class"""
# Initialize members of the class
if id is not APIHelper.SKIP:
self.id = id
if category is not APIHelper.SKIP:
self.category = category
self.name = name
self.photo_urls = photo_urls
if tags is not APIHelper.SKIP:
self.tags = tags
if status is not APIHelper.SKIP:
self.status = status
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get("name") if dictionary.get("name") else None
photo_urls = dictionary.get("photoUrls") if dictionary.get("photoUrls") else None
id = dictionary.get("id") if dictionary.get("id") else APIHelper.SKIP
category = Category.from_dictionary(dictionary.get('category')) if 'category' in dictionary.keys() else APIHelper.SKIP
tags = None
if dictionary.get('tags') is not None:
tags = [Tag.from_dictionary(x) for x in dictionary.get('tags')]
else:
tags = APIHelper.SKIP
status = dictionary.get("status") if dictionary.get("status") else APIHelper.SKIP
# Return an object of this model
return cls(name,
photo_urls,
id,
category,
tags,
status) | /repo-test-sdk-7.7.7.tar.gz/repo-test-sdk-7.7.7/swaggerpetstore/models/pet.py | 0.558327 | 0.158369 | pet.py | pypi |
from swaggerpetstore.api_helper import APIHelper
class ApiResponse(object):
"""Implementation of the 'ApiResponse' model.
TODO: type model description here.
Attributes:
code (int): TODO: type description here.
mtype (string): TODO: type description here.
message (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"code": 'code',
"mtype": 'type',
"message": 'message'
}
_optionals = [
'code',
'mtype',
'message',
]
def __init__(self,
code=APIHelper.SKIP,
mtype=APIHelper.SKIP,
message=APIHelper.SKIP):
"""Constructor for the ApiResponse class"""
# Initialize members of the class
if code is not APIHelper.SKIP:
self.code = code
if mtype is not APIHelper.SKIP:
self.mtype = mtype
if message is not APIHelper.SKIP:
self.message = message
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
code = dictionary.get("code") if dictionary.get("code") else APIHelper.SKIP
mtype = dictionary.get("type") if dictionary.get("type") else APIHelper.SKIP
message = dictionary.get("message") if dictionary.get("message") else APIHelper.SKIP
# Return an object of this model
return cls(code,
mtype,
message) | /repo-test-sdk-7.7.7.tar.gz/repo-test-sdk-7.7.7/swaggerpetstore/models/api_response.py | 0.555676 | 0.183429 | api_response.py | pypi |
from swaggerpetstore.api_helper import APIHelper
class User(object):
"""Implementation of the 'User' model.
TODO: type model description here.
Attributes:
id (long|int): TODO: type description here.
username (string): TODO: type description here.
first_name (string): TODO: type description here.
last_name (string): TODO: type description here.
email (string): TODO: type description here.
password (string): TODO: type description here.
phone (string): TODO: type description here.
user_status (int): User Status
"""
# Create a mapping from Model property names to API property names
_names = {
"id": 'id',
"username": 'username',
"first_name": 'firstName',
"last_name": 'lastName',
"email": 'email',
"password": 'password',
"phone": 'phone',
"user_status": 'userStatus'
}
_optionals = [
'id',
'username',
'first_name',
'last_name',
'email',
'password',
'phone',
'user_status',
]
def __init__(self,
id=APIHelper.SKIP,
username=APIHelper.SKIP,
first_name=APIHelper.SKIP,
last_name=APIHelper.SKIP,
email=APIHelper.SKIP,
password=APIHelper.SKIP,
phone=APIHelper.SKIP,
user_status=APIHelper.SKIP):
"""Constructor for the User class"""
# Initialize members of the class
if id is not APIHelper.SKIP:
self.id = id
if username is not APIHelper.SKIP:
self.username = username
if first_name is not APIHelper.SKIP:
self.first_name = first_name
if last_name is not APIHelper.SKIP:
self.last_name = last_name
if email is not APIHelper.SKIP:
self.email = email
if password is not APIHelper.SKIP:
self.password = password
if phone is not APIHelper.SKIP:
self.phone = phone
if user_status is not APIHelper.SKIP:
self.user_status = user_status
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get("id") if dictionary.get("id") else APIHelper.SKIP
username = dictionary.get("username") if dictionary.get("username") else APIHelper.SKIP
first_name = dictionary.get("firstName") if dictionary.get("firstName") else APIHelper.SKIP
last_name = dictionary.get("lastName") if dictionary.get("lastName") else APIHelper.SKIP
email = dictionary.get("email") if dictionary.get("email") else APIHelper.SKIP
password = dictionary.get("password") if dictionary.get("password") else APIHelper.SKIP
phone = dictionary.get("phone") if dictionary.get("phone") else APIHelper.SKIP
user_status = dictionary.get("userStatus") if dictionary.get("userStatus") else APIHelper.SKIP
# Return an object of this model
return cls(id,
username,
first_name,
last_name,
email,
password,
phone,
user_status) | /repo-test-sdk-7.7.7.tar.gz/repo-test-sdk-7.7.7/swaggerpetstore/models/user.py | 0.456168 | 0.169028 | user.py | pypi |
from swaggerpetstore.api_helper import APIHelper
class Order(object):
"""Implementation of the 'Order' model.
TODO: type model description here.
Attributes:
id (long|int): TODO: type description here.
pet_id (long|int): TODO: type description here.
quantity (int): TODO: type description here.
ship_date (datetime): TODO: type description here.
status (Status1Enum): Order Status
complete (bool): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"id": 'id',
"pet_id": 'petId',
"quantity": 'quantity',
"ship_date": 'shipDate',
"status": 'status',
"complete": 'complete'
}
_optionals = [
'id',
'pet_id',
'quantity',
'ship_date',
'status',
'complete',
]
def __init__(self,
id=APIHelper.SKIP,
pet_id=APIHelper.SKIP,
quantity=APIHelper.SKIP,
ship_date=APIHelper.SKIP,
status=APIHelper.SKIP,
complete=APIHelper.SKIP):
"""Constructor for the Order class"""
# Initialize members of the class
if id is not APIHelper.SKIP:
self.id = id
if pet_id is not APIHelper.SKIP:
self.pet_id = pet_id
if quantity is not APIHelper.SKIP:
self.quantity = quantity
if ship_date is not APIHelper.SKIP:
self.ship_date = APIHelper.RFC3339DateTime(ship_date) if ship_date else None
if status is not APIHelper.SKIP:
self.status = status
if complete is not APIHelper.SKIP:
self.complete = complete
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get("id") if dictionary.get("id") else APIHelper.SKIP
pet_id = dictionary.get("petId") if dictionary.get("petId") else APIHelper.SKIP
quantity = dictionary.get("quantity") if dictionary.get("quantity") else APIHelper.SKIP
ship_date = APIHelper.RFC3339DateTime.from_value(dictionary.get("shipDate")).datetime if dictionary.get("shipDate") else APIHelper.SKIP
status = dictionary.get("status") if dictionary.get("status") else APIHelper.SKIP
complete = dictionary.get("complete") if "complete" in dictionary.keys() else APIHelper.SKIP
# Return an object of this model
return cls(id,
pet_id,
quantity,
ship_date,
status,
complete) | /repo-test-sdk-7.7.7.tar.gz/repo-test-sdk-7.7.7/swaggerpetstore/models/order.py | 0.558809 | 0.170854 | order.py | pypi |
import os
import shutil
import tempfile
from pathlib import Path
from typing import Optional
import typer
from ruamel.yaml import YAML
from repo_tools.common import utils
circle_app = typer.Typer()
TOP_LEVEL_KEYS = ["version", "description", "display", "orbs", "parameters"]
@circle_app.command()
def explode(
path: Optional[str] = typer.Argument(".", help="path to the circle ci config"),
out: Optional[str] = typer.Option("./.circleci/config/", help="output directory"),
):
if not path.endswith(".yml"):
utils.exit_cli("path must be a yml file", 1)
path_to_file = os.path.abspath(os.path.join(os.getcwd(), path))
output_full_path = os.path.abspath(os.path.join(os.getcwd(), out))
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.preserve_quotes = True
with tempfile.TemporaryDirectory() as temp_dir:
file_tree = generate_file_tree(yaml, path_to_file, output_dir=temp_dir)
_create_file_tree(yaml, file_tree)
shutil.copytree(temp_dir, output_full_path)
def generate_file_tree(yaml: YAML, path: str, output_dir: str):
data = yaml.load(Path(path))
files_to_be_created = dict()
at_file = dict({key: value for key, value in data.items() if key in TOP_LEVEL_KEYS})
files_to_be_created.update({os.path.join(output_dir, "@config.yml"): at_file})
configuration = dict(
{key: value for key, value in data.items() if key not in TOP_LEVEL_KEYS}
)
for key, value in configuration.items():
temp_file_path = os.path.join(output_dir, key)
for _key, _value in value.items():
if isinstance(_value, dict):
files_to_be_created.update(
{os.path.join(temp_file_path, f"{_key}.yml"): _value.items()}
)
else:
files_to_be_created.update(
{os.path.join(temp_file_path, f"@{_key}.yml"): {_key: _value}}
)
return files_to_be_created
def _create_file_tree(yaml, files_to_be_created):
for path, content in files_to_be_created.items():
os.makedirs(path.rsplit("/", 1)[0], exist_ok=True)
with open(path, "w+") as file:
yaml.dump(dict(content), file) | /repo_tools-0.2.0-py3-none-any.whl/repo_tools/commands/circle.py | 0.530966 | 0.212968 | circle.py | pypi |
import json
import os
from typing import Optional
import typer
import yaml
from tabulate import tabulate
from repo_tools.common import utils
from repo_tools.common.structs import OutputFormat
config_github_app = typer.Typer()
@config_github_app.command(
help="Detects the github oauth_token from 'gh' tool (if exists)"
)
def detect(path: str = typer.Argument(""), overwrite: bool = typer.Option(False)):
gh_host_config_path = os.path.join(os.path.expanduser("~/.config/gh/hosts.yml"))
if path:
gh_host_config_path = path
with open(gh_host_config_path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
github_oauth_token = config.get("github.com", {}).get("oauth_token", "")
if not github_oauth_token:
utils.exit_cli(
"Failed to detect github oauth token, use 'config gh setup' instead.",
status_code=1,
)
utils.save_config({"github": {"oauth_token": github_oauth_token}}, overwrite)
@config_github_app.command(help="Setup multi to interact with github")
def setup(
token: Optional[str] = typer.Option(
None, prompt="Please input your GitHub API token"
),
org: Optional[str] = typer.Option(
None, prompt="Please input your GitHub organization/username"
),
overwrite: bool = typer.Option(False),
):
utils.save_config({"github": {"oauth_token": token, "org": org}}, overwrite)
@config_github_app.command(help="Show the current github configuration")
def show(
fmt: OutputFormat = typer.Option(
"table", "--format", "-fmt", help="show output as table or json"
),
):
github_config_values = utils.get_config_value("github")
if not github_config_values:
utils.exit_cli(
"Please use 'config gh detect' or 'config gh setup' to set github config.",
status_code=1,
)
github_config = [[key, value] for key, value in github_config_values.items()]
if fmt == OutputFormat.table:
out = tabulate(github_config, headers=["Key", "Value"], colalign=("right",))
if fmt == OutputFormat.json:
out = json.dumps(
github_config_values,
indent=4,
sort_keys=True,
)
typer.echo(out) | /repo_tools-0.2.0-py3-none-any.whl/repo_tools/commands/config_github.py | 0.453988 | 0.176601 | config_github.py | pypi |
from typing import Dict, Any
_package_data: Dict[str, Any] = dict(
full_package_name='repo',
version_info=(0, 3, 0),
__version__='0.3.0',
version_timestamp='2022-10-12 09:07:41',
author='Anthon van der Neut',
author_email='a.van.der.neut@ruamel.eu',
description='repo supports complex repository handling',
keywords='pypi statistics',
entry_points='repo=repo.__main__:main',
# entry_points=None,
license='Copyright Ruamel bvba 2007-2022',
since=2022,
# status='α|β|stable', # the package status on PyPI
# data_files="",
# universal=True, # py2 + py3
# install_requires=['ruamel.std.pathlib', ],
tox=dict(env='3',), # *->all p->pypy
python_requires='>=3',
) # NOQA
version_info = _package_data['version_info']
__version__ = _package_data['__version__']
_cligen_data = """\
# all tags start with an uppercase char and can often be shortened to three and/or one
# characters. If a tag has multiple uppercase letter, only using the uppercase letters is a
# valid shortening
# Tags used:
# !Commandlineinterface, !Cli,
# !Option, !Opt, !O
# - !Option [all, !Action store_true, !Help build sdist and wheels for all platforms]
# !PreSubparserOption, !PSO
# !Alias for a subparser
# - !DefaultSubparser # make this (one) subparser default
# !Help, !H
# !HelpWidth 40 # width of the left side column width option details
# !Argument, !Arg
# - !Arg [files, !Nargs '*', !H files to process]
# !Module # make subparser function calls imported from module
# !Instance # module.Class: assume subparser method calls on instance of Class imported from module
# !Main # function to call/class to instantiate, no subparsers
# !Action # either one of the actions in cligen subdir _action (by stem of the file) or e.g. "store_action"
# !Nargs, !N
# provide a number, '?', '*', or +. Special code is inserted to allow for defaults when +
# !Config YAML/INI/PON read defaults from config file
# !AddDefaults ' (default: %(default)s)'
# !Prolog (sub-)parser prolog/description text (for multiline use | ), used as subparser !Help if not set
# !Epilog (sub-)parser epilog text (for multiline use | )
# !NQS used on arguments, makes sure the scalar is non-quoted e.g for instance/method/function
# call arguments, when cligen knows about what argument a keyword takes, this is not needed
!Cli 0:
- !Instance repo.repo.Repo
- !AddDefaults ' (default: %(default)s)'
- !Option [verbose, v, !Help increase verbosity level, !Action count]
# - !Option [distbase, !Help 'base directory for all distribution files']
# - !Config YAML
# - !PSO [config, !Help configuration file location']
- show:
- !DefaultSubparser
- !Help execute show related commands
# - !Option [all, !Action store_true, !Help build sdist and wheels for all platforms]
# - !Option [linux, !Action store_true, !Help build linux wheels using manylinux]
# - !Arg [args, !Nargs: '*', !H you have to do this]
# - !Prolog 'Prolog for the parser'
# - !Epilog 'Epilog for the parser'
""" | /repo-0.3.0.tar.gz/repo-0.3.0/__init__.py | 0.630571 | 0.242968 | __init__.py | pypi |
[](https://circleci.com/gh/SIMEXP/Repo2Data) [](https://badge.fury.io/py/repo2data) [](https://www.python.org/downloads/release/python-360/) 
# Repo2Data
Repo2Data is a **python3** package that automatically fetches data from a remote server, and decompresses it if needed. Supported web data sources are [amazon s3](https://docs.aws.amazon.com/AmazonS3/latest/dev/Welcome.html), [datalad](https://www.datalad.org/), [osf](https://osf.io/), [Google drive](https://www.google.com/intl/en_ca/drive/), raw http(s) or specific python lib datasets (`sklearn.datasets.load`, `nilearn.datasets.fetch` etc...).
## Input
A `data_requirement.json` configuration file explaining what should be read, where should you store the data, and a project name (name of the folder where file will be downloaded).
```
{ "src": "https://github.com/SIMEXP/Repo2Data/archive/master.zip",
"dst": "./data",
"projectName": "repo2data_out"}
```
`src` is where you configure the upstream location for your data.
`dst` specifies where (which folder) the data should be downloaded.
`projectName` is the name of the directory where the data will be saved, such that you can access it at `{dst}/{projectName}`
## Output
The content of the server inside the specified folder.
## Execution
The tool can be executed through `bash` or imported as a python API.
#### Bash
If `data_requirement.json` is inside current directory, you can call the following on the command line:
```
repo2data
```
#### Python API
After defining the `data_requirement.json` and importing the module, first instanciate the `Repo2Data` object with:
```
from repo2data.repo2data import Repo2Data
# define data requirement path
data_req_path = os.path.join("data_requirement.json")
# download data
repo2data = Repo2Data(data_req_path)
```
You can then fetch the data with the `install` method, which returns a list to the output directory(ies) where the data was downloaded:
```
data_path = repo2data.install()
```
## Examples of data_requirement.json
###### archive file
Repo2Data will use `wget` if it detects a http link.
If this file is an archive, it will be automatically be decompressed using [patool](https://github.com/wummel/patool). Please unsure that you download the right package to unarchive the data (ex: `/usr/bin/tar` for `.tar.gz`).
```
{ "src": "https://github.com/SIMEXP/Repo2Data/archive/master.tar.gz",
"dst": "./data",
"projectName": "repo2data_wget"}
```
###### Google Drive
It can also download a file from [Google Drive](https://www.google.com/intl/en_ca/drive/) using [gdown](https://github.com/wkentaro/gdown).
You will need to make sure that your file is available **publically**, and get the project ID (a chain of 33 characters that you can find on the url).
Then you can construct the url with this ID:
`https://drive.google.com/uc?id=${PROJECT_ID}`
For example:
```
{ "src": "https://drive.google.com/uc?id=1_zeJqQP8umrTk-evSAt3wCLxAkTKo0lC",
"dst": "./data",
"projectName": "repo2data_gdrive"}
```
###### library data-package
You will need to put the script to import and download the data in the `src` field, the lib should be installed on the host machine.
Any lib function to fetch data needs a parameter so it know where is the output directory. To avoid dupplication of the destination parameter, please replace the parameter for the output dir in the function by `_dst`.
For example write `tf.keras.datasets.mnist.load_data(path=_dst)` instead of `tf.keras.datasets.mnist.load_data(path=/path/to/your/data)`.
Repo2Data will then automatically replace `_dst` by the one provided in the `dst` field.
```
{ "src": "import tensroflow as tf; tf.keras.datasets.mnist.load_data(path=_dst)",
"dst": "./data",
"projectName": "repo2data_lib"}
```
###### datalad
The `src` should be point to a `.git` link if using `datalad`, `Repo2Data` will then just call `datalad get`.
```
{ "src": "https://github.com/OpenNeuroDatasets/ds000005.git",
"dst": "./data",
"projectName": "repo2data_datalad"}
```
###### s3
To download an amazon s3 link, `Repo2Data` uses `aws s3 sync --no-sign-request` command. So you should provide the `s3://` bucket link of the data:
```
{ "src": "s3://openneuro.org/ds000005",
"dst": "./data",
"projectName": "repo2data_s3"}
```
###### osf
`Repo2Data` uses [osfclient](https://github.com/osfclient/osfclient) `osf -p PROJECT_ID clone` command. You will need to give the link to the **public** project containing your data `https://osf.io/.../`:
```
{ "src": "https://osf.io/fuqsk/",
"dst": "./data",
"projectName": "repo2data_osf"}
```
If you need to download a single file, or a list of files, you can do this using the `remote_filepath` field wich runs `osf -p PROJECT_ID fetch -f file`. For example to download two files (https://osf.io/aevrb/ and https://osf.io/bvuh6/), use a relative path to the root of the project:
```
{ "src": "https://osf.io/fuqsk/",
"remote_filepath": ["hello.txt", "test-subfolder/hello-from-subfolder.txt"],
"dst": "./data",
"projectName": "repo2data_osf_multiple"}
```
###### zenodo
The public data repository [zenodo](https://zenodo.org/) is also supported using [zenodo_get](https://gitlab.com/dvolgyes/zenodo_get). Make sure your project is public and it has a DOI with the form `10.5281/zenodo.XXXXXXX`:
```
{ "src": "10.5281/zenodo.6482995",
"dst": "./data",
"projectName": "repo2data_zenodo"}
```
###### multiple data
If you need to download many data at once, you can create a list of json. For example, to download different files from a repo :
```
{
"authors": {
"src": "https://github.com/tensorflow/tensorflow/blob/master/AUTHORS",
"dst": "./data",
"projectName": "repo2data_multiple1"
},
"license": {
"src": "https://github.com/tensorflow/tensorflow/blob/master/LICENSE",
"dst": "./data",
"projectName": "repo2data_multiple2"
}
}
```
## Install
### Docker (recommended)
This is the recommended way of using `Repo2Data`, because it encapsulate all the dependencies inside the container. It also features `scikit-learn` and `nilearn` to pull data from.
Clone this repo and build the docker image yourself :
```
git clone https://github.com/SIMEXP/Repo2Data
sudo docker build --tag repo2data ./Repo2Data/
```
### pip
To install `Datalad` you will need the latest version of [git-annex](https://git-annex.branchable.com/install/), please use the [package from neuro-debian](https://git-annex.branchable.com/install/) :
```
wget -O- http://neuro.debian.net/lists/stretch.us-nh.full | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list
sudo apt-key adv --recv-keys --keyserver hkp://ipv4.pool.sks-keyservers.net:80 0xA5D32F012649A5A9
```
If you have troubles to download the key, please look at this [issue](https://github.com/jacobalberty/unifi-docker/issues/64).
You can now install with `pip`:
```
python3 -m pip install repo2data
```
## Usage
After creating the `data_requirement.json`, just use `repo2data` without any option:
```
repo2data
```
### requirement in another directory
If the `data_requirement.json` is in another directory, use the `-r` option:
```
repo2data -r /PATH/TO/data_requirement.json
```
### github repo url as input
Given a valid https github repository with a `data_requirement.json` at `HEAD` branch (under a `binder` directory or at its root), you can do:
```
repo2data -r GITHUB_REPO
```
An example of a valid `GITHUB_REPO` is: https://github.com/ltetrel/repo2data-caching-s3
### Trigger re-fetch
When you re-run Repo2Data with the same destination, it will automatically look for an existing `data_requirement.json` file in the downloaded folder.
If the configured `data_requirement.json` is the same (i.e. the [JSON dict](https://www.w3schools.com/python/python_json.asp) has the same fields) then it will not re-download.
To force the re-fetch (update existing files, add new files but keep the old files), you can add a new field or update an existing one in the `data_requirement.json`.
For example replace:
```
{ "src": "https://github.com/SIMEXP/Repo2Data/archive/master.zip",
"dst": "./data",
"projectName": "repo2data_out"}
```
by
```
{ "src": "https://github.com/SIMEXP/Repo2Data/archive/master.zip",
"dst": "./data",
"projectName": "repo2data_out",
"version": "1.1"}
```
This is especially usefull when the provenance link always stay the same (osf, google drive...).
### make `dst` field optionnal
##### using `dataLayout` field
In the case you have a fixed known layout for the data folder within a github repository, the `dst` field is not needed anymore.
To define what kind of layout you want, you can use the `dataLayout` field.
For now we just support the [neurolibre layout](https://docs.neurolibre.org/en/latest/SUBMISSION_STRUCTURE.html#preprint-repository-structure):
```
{ "src": "https://github.com/SIMEXP/Repo2Data/archive/master.zip",
"dataLayout": "neurolibre"}
```
If you need another data layout (like [YODA](https://f1000research.com/posters/7-1965) or [cookiecutter-data-science](https://drivendata.github.io/cookiecutter-data-science/)) you can create a feature request.
##### for administrator
You can disable the field `dst` by using the option
`repo2data --server`
In this case `Repo2Data` will put the data inside the folder `./data` from where it is run. This is usefull if you want to have full control over the destination (you are a server admin and don't want your users to control the destination).
### Docker
You will need to create a folder on your machine (containing a `data_requirement.json`) that the Docker container will access so `Repo2Data` can pull the data inside it, after you can use:
```
sudo docker run -v /PATH/TO/FOLDER:/data repo2data
```
(the container will run with `--server` enabled, so all the data in the container will be at `/data`)
A requirement from a github repo is also supported (so you don't need any `data_requirement.json` inside your host folder):
```
sudo docker run -v /PATH/TO/FOLDER:/data repo2data -r GITHUB_REPO
```
`Docker` mounts the host (your machine) folder into the container folder as `-v host_folder:container_folder`, so don't override `:/data`.
| /repo2data-2.7.0.tar.gz/repo2data-2.7.0/README.md | 0.47098 | 0.975716 | README.md | pypi |
import repobee_plug as plug
from .canvas_api.api import CanvasAPI
from .canvas_api.assignment import Assignment
from .common_options import CANVAS_API_KEY_OPTION
from .common_options import CANVAS_API_BASE_URL_OPTION
from .common_options import CANVAS_COURSE_ID_OPTION
from .common_options import CANVAS_ASSIGNMENT_ID_OPTION
from .logging import inform, warn
UPLOAD_SUBMISSION = "online_upload"
DEFAULT_PREPARATION_MESSAGE = "This assignment is managed by repobee-canvas."
CANVAS_START_ASSIGNMENT_MESSAGE_OPTION = plug.cli.option(
help = "Message posted to a submission to indicate start of assignment",
required = False,
configurable = True,
default = DEFAULT_PREPARATION_MESSAGE
)
def check(requirement, success : str, failure : str) -> bool:
"""Check requirement. If okay, show success message and return True.
Otherwise, show failure message and return False.
"""
if requirement():
inform("☒ " + success)
return True
inform("☐ " + failure)
return False
class PrepareCanvasAssignment(plug.Plugin, plug.cli.Command):
""" The PrepareCanvasAssignment class is a RepoBee plugin to check the
configuration of an assignment: Is it configured correctly for use with the
Canvas plugin? In particular, does the assignment have both URL and file
upload submission types enabled.
Usage:
repobee -p canvas prepare-assignment \
--canvas-assignment-id N \
[--canvas-start-assignment-message MSG]
Checks if assignment with ID N is configured correctly and allows file
uploads. Furthermore, to enable group assignments and be transparent to
students that this assignment is being managed by repobee-canvas, an
initial message is send. The message is only send once, and only if all
checks pass.
You can configure your own message, or supply it via a command-line
argument. By default, the message is "This assignment is managed by
repobee-canvas.".
Hack: You can use this command to send messages to students in Canvas
submission: as long as the new message is different from any already in
the submission, it will get posted if checks pass.
"""
__settings__ = plug.cli.command_settings(
action = "prepare-assignment",
help = ("Check configuration of the Canvas assignment and "
"prepare the assignment for group work."),
description = (
"Check the configuration of the supplied Canvas "
"assignment for compatibility with the Canvas plugin "
"and prepare it for group assignments."
),
)
canvas_api_key = CANVAS_API_KEY_OPTION
canvas_base_url = CANVAS_API_BASE_URL_OPTION
canvas_course_id = CANVAS_COURSE_ID_OPTION
canvas_assignment_id = CANVAS_ASSIGNMENT_ID_OPTION
canvas_start_assignment_message = CANVAS_START_ASSIGNMENT_MESSAGE_OPTION
def command(self):
CanvasAPI().setup(self.canvas_base_url, self.canvas_api_key)
assignment = Assignment.load(self.canvas_course_id, self.canvas_assignment_id)
requirements = [
check(
lambda: UPLOAD_SUBMISSION in assignment.submission_types,
"File upload submission enabled",
"File upload submission disabled"
),
]
if all(requirements):
# Prepare for group assignments by adding a comment. In Canvas,
# submissions are linked to a single student until the first
# comment or submission.
for submission in assignment.submissions():
comments = [sc.comment for sc in submission.comments()]
if self.canvas_start_assignment_message not in comments:
submission.add_comment(self.canvas_start_assignment_message)
inform(("Assignment configuration is OKAY. "
"All Canvas submissions have been initialized."))
else:
warn((
"Assignment configuration is NOT okay. "
"Please fix the above issues and run this command again."
)) | /repobee-canvas-0.0.1.tar.gz/repobee-canvas-0.0.1/repobee_canvas/prepare_canvas_assignment.py | 0.508056 | 0.153899 | prepare_canvas_assignment.py | pypi |
"""Wrapper for a Canvas assignment API object."""
from datetime import datetime
from .api import CanvasAPI, OVERRIDES
from .canvas_object import CanvasObject
from .course import Course
from .submission import Submission
from .assignment_override import AssignmentOverride
class Assignment (CanvasObject):
"""Canvas assignment.
See https://canvas.instructure.com/doc/api/assignments.html
"""
@staticmethod
def load(course_id : int, assignment_id : int):
"""
Load a Canvas assignment object.
:param int course_id: The course id the assignment is part of
:param int assignment_id: The id of the assignment to load
"""
return Assignment(CanvasAPI().assignment(course_id, assignment_id))
def overrides(self):
"""Get overrides for this assignment"""
if self.has_overrides and not self._overrides:
if OVERRIDES not in self._data:
overrides_data = CanvasAPI().overrides(self.course_id, self.id)
self._data[OVERRIDES] = overrides_data
self._overrides = [AssignmentOverride(o) for o in self._data[OVERRIDES]]
return self._overrides
def course(self):
"""The course this assignment is part of."""
if not self._course:
self._course = Course.load(self.course_id)
return self._course
def is_group_assignment(self):
"""Is this assignment a group assignment?"""
return self.group_category_id is not None
def get_submission(self, students):
"""Get all submissions by given students for this assignment.
Parameters:
- students: A list of student IDs to get submissions for.
"""
submissions = self.submissions(students = students)
for submission in submissions:
students_set = set(students)
submission_students_set = {s.login_id for s in submission.students()}
if students_set == submission_students_set:
return submission
raise ValueError(f"""No submission found for '{', '.join(students)}'.""")
def submissions(self,
skip_test_student = True,
sections = [], # list of section names to look for submission,
# empty list means all sections
due_dates = [], # list of due dates to look for submissions.
# empty list means all due dates
students = [], # Only get info about these students. If not
# found, warning or error?
filters = [], # List of filters, each filter has type Submission -> Boolean
reset = False, # Reload data from the server
):
"""A list of submissions associated with this assignment."""
if not self._submissions or reset:
if len(sections) > 0:
# Only get the submissions in the listed sections
submissions_data = []
for section_id in [section.id for section in self.course().sections(sections)]:
submissions_data += CanvasAPI().submissions_per_section(section_id, self.id)
else:
# Otherwise get all the submissions
submissions_data = CanvasAPI().submissions_per_course(self.course_id, self.id)
# Convert submission data from JSON to Submission objects
submissions = [Submission(s) for s in submissions_data]
# Filter out the test student
if skip_test_student:
submissions = [s for s in submissions if not s.submitter().is_test_student()]
self._submissions = submissions
# Filtering
all_filters = []
if skip_test_student:
all_filters.append(lambda s: not s.submitter().is_test_student())
if students is not None and len(students) > 0:
all_filters.append(lambda s: s.submitter().login_id in students)
if filters is not None and len(filters) > 0:
all_filters.extend(filters)
if len(due_dates) > 0:
# Filter out based on due dates (should match exactly?)
to_date = lambda d: datetime.strptime(d, "%Y-%m-%dT%H:%M:%S%z").date()
overrides = [o for o in self.overrides() if to_date(o.due_at) in due_dates]
override_students = []
for override in overrides:
override_students += override.students()
print(due_dates, [to_date(o.due_at) for o in self.overrides()])
submissions = [s for s in submissions if any([u in override_students for u in s.students()])]
include = lambda s : all([f(s) for f in all_filters])
# Only intersted in one submission per group. Note. For a group
# assignment, before any student or teacher has submitted
# anything, no group submissions show up.
groups = set()
submissions = []
for submission in self._submissions:
if submission.is_group_submission():
if not submission.group().id in groups and include(submission):
groups.add(submission.group().id)
submissions.append(submission)
else:
if include(submission):
submissions.append(submission)
return submissions | /repobee-canvas-0.0.1.tar.gz/repobee-canvas-0.0.1/repobee_canvas/canvas_api/assignment.py | 0.69181 | 0.434341 | assignment.py | pypi |
"""Wrapper for a Canvas submission API object."""
from typing import List
from .api import CanvasAPI, GROUP, ID, COURSE, SUBMISSION_COMMENTS
from .canvas_object import CanvasObject
from .comment import Comment
from .course import Course
from .group import Group
from .peer_review import PeerReview
from .user import User
class Submission (CanvasObject):
"""Canvas submission.
See https://canvas.instructure.com/doc/api/submissions.html
"""
@staticmethod
def load(course_id : int, assignment_id : int, user_id : int):
"""
Load a Canvas submission object.
Args:
- course_id: The course id
- assignment_id: The assignment id
- user_id: The user id
"""
return Submission(CanvasAPI().submission(course_id, assignment_id, user_id))
def submitter(self) -> User:
"""Submitter"""
if not self._submitter:
self._submitter = User.load(self.user_id)
return self._submitter
def is_group_submission(self) -> bool:
"""Return True if this submission is a group submission; False
otherwise.
Note. A submission is only recognized as a groups submission after
some student (or teacher) in that group submitted something. This
can also be a comment, though.
"""
return self._data[GROUP][ID] is not None
def group(self) -> Group:
"""This submission's group, if any."""
if not self._group and self.is_group_submission():
self._group = Group.load(self._data[GROUP][ID])
return self._group
def course(self) -> Course:
"""The course this submission belongs to."""
if not self._course:
self._course = Course(self._data[COURSE])
return self._course
def comments(self) -> List[Comment]:
"""The comments made on this submission."""
submission_comments = self._data[SUBMISSION_COMMENTS]
if submission_comments:
return [Comment(c) for c in submission_comments]
return []
def students(self) -> List[User]:
"""A list of students that worked on this submission."""
if self.is_group_submission():
return self.group().members()
return [self.submitter()]
def has_peer_reviews(self) -> bool:
"""Return True if this submission does have peer reviews; False
otherwise."""
return len(self.peer_reviews()) > 0
def pending_peer_reviews(self) -> bool:
"""Return True if this submission has pending peer reviews; False
otherwise."""
return [p for p in self.peer_reviews() if p.pending]
def finished_peer_reviews(self) -> bool:
"""Return True if any peer review for this submission has been
completed; False otherwise."""
return [p for p in self.peer_reviews() if p.finished]
def peer_reviews_finished(self) -> bool:
"""Return True if all peer reviews for this submission have been
completed; False otherwise."""
return len(self.pending_peer_reviews()) == 0
def peer_reviews(self) -> List[PeerReview]:
"""Return a list of peer reviews for this submission, if any."""
if not self._peer_reviews:
peer_reviews = CanvasAPI().peer_reviews(self.course().id, self.assignment_id, self.id)
self._peer_reviews = [PeerReview(p) for p in peer_reviews]
return self._peer_reviews
def add_comment(self, msg : str, file_path : str = None):
"""Add a new comment to this submission."""
return CanvasAPI().add_comment_to_submission(
self.course().id,
self.assignment_id,
self.user_id,
msg,
file_path
)
def submit_url(self, url : str, msg : str = None, submitted_at = None):
"""Submit an URL."""
return CanvasAPI().submit_url(
self.course().id,
self.assignment_id,
self.user_id,
url,
msg,
submitted_at)
def submit_text(self, text : str, msg : str = None, submitted_at = None):
"""Submit a text."""
return CanvasAPI().submit_text(
self.course().id,
self.assignment_id,
self.user_id,
text,
msg,
submitted_at)
def submit_file(self, file_path : str, msg :str = None, submitted_at = None):
"""Submit a file."""
return CanvasAPI().submit_file(
self.course().id,
self.assignment_id,
self.user_id,
file_path,
msg,
submitted_at) | /repobee-canvas-0.0.1.tar.gz/repobee-canvas-0.0.1/repobee_canvas/canvas_api/submission.py | 0.914753 | 0.284329 | submission.py | pypi |
import collections
import re
import repobee_plug as plug
class GradeSpec(
collections.namedtuple("GradeSpec", "priority symbol regex".split())
):
"""A GradeSpec is a grade specification triple containing a symbol for
representing the grade in a grade sheet, a priority for determining which
grade to pick if multiple are found, and a regex to match against issue
titles to find grading issues.
"""
@classmethod
def from_format(cls, format_str: str):
r"""Build a GradeSpec tuple from a format string. The format string should
be on the following form:
``<PRIORITY>:<SYMBOL>:<REGEX>``
The expression must match the regex (\d+):([A-Za-z\d]+):(.+)
<PRIORITY> is a positive integer value specifying how important the
grade is. If multiple grading issues are found in the same repository,
the one with the lowest priority is reported.
<SYMBOL> is one or more characters specifying how the grade is
represented in the CSV grade sheet. Only characters matching the regex
[A-Za-z0-9] are accepted.
<REGEX> is any valid regex to match against issue titles.
For example, the format string "P:1:[Pp]ass" will specifies a grade
spec with symbol P, priority 1 (the lowest possible priority) and will
match the titles "Pass" and "pass".
Args:
format_str: A grade spec format string as defined above.
Returns:
A GradeSpec.
"""
pattern = r"(\d+):([A-Za-z\d]+):(.+)"
match = re.match(pattern, format_str)
if not match:
raise plug.PlugError(
"invalid format string: {}".format(format_str)
)
priority_str, symbol, regex = match.groups()
priority = int(priority_str)
return super().__new__(
cls, symbol=symbol, priority=priority, regex=regex
)
def __lt__(self, o):
return self.priority < o.priority | /repobee-csvgrades-0.2.1.tar.gz/repobee-csvgrades-0.2.1/repobee_csvgrades/_containers.py | 0.771843 | 0.611556 | _containers.py | pypi |
import argparse
import pathlib
import itertools
import daiquiri
import repobee_plug as plug
from repobee_csvgrades import (
_file,
_grades,
_marker,
_containers,
_exception,
)
PLUGIN_NAME = "csvgrades"
LOGGER = daiquiri.getLogger(__file__)
grades_category = plug.cli.category(
"grades",
action_names=["record"],
help="collect grading of students",
description="Used to gather all student grades and save them insade a "
"CSV file.",
)
def callback(args: argparse.Namespace) -> None:
results_file = args.hook_results_file
grades_file = args.grades_file
hook_results_mapping = _file.read_results_file(results_file)
if "list-issues" not in hook_results_mapping:
raise _exception.FileError(
"can't locate list-issues metainfo in hook results"
)
if (
not args.allow_other_states
and plug.IssueState(
hook_results_mapping["list-issues"][0].data["state"]
)
!= plug.IssueState.ALL
):
raise _exception.FileError(
"`repobee issues list` was not run with the --all flag. This may "
"cause grading issues to be missed. Re-run `issues list` with the "
"--all flag, or run this command with --allow-other-states to "
"record grades anyway."
)
grade_specs = list(
map(_containers.GradeSpec.from_format, args.grade_specs)
)
grades = _grades.Grades(grades_file, args.assignments, grade_specs)
grades.check_users(
itertools.chain.from_iterable([t.members for t in args.students])
)
new_grades = _marker.mark_grades(
grades,
hook_results_mapping,
args.students,
args.assignments,
args.teachers,
grade_specs,
)
if new_grades:
_file.write_edit_msg(
sorted(new_grades.items()),
args.assignments,
pathlib.Path(args.edit_msg_file),
)
_file.write_grades_file(grades_file, grades)
else:
LOGGER.warning("No new grades reported")
class CSVGradeCommand(plug.Plugin, plug.cli.Command):
def command(self):
callback(self.args)
__settings__ = plug.cli.command_settings(
help="record grades from issues into a CSV file",
description="Record grades from issues into a CSV file. Grade "
"specifications on the form <PRIORITY>:<SYMBOL>:<REGEX> "
"specify which issues are grading issues (by matching the title "
"against the spec regex), and the corresponding symbol is written "
"into the grades CSV file. If multiple grading issues are found "
"in the same repo, the one with the lowest priority is recorded. "
"A grade in the CSV file can only be overwritten by a grade with "
"lower priority. Only grading issues opened by teachers "
"specified by the ``--teachers`` option are recorded. Read more "
"at https://github.com/slarse/repobee-csvgrades",
action=grades_category.record,
base_parsers=[
plug.BaseParser.ASSIGNMENTS,
plug.BaseParser.STUDENTS,
],
)
allow_other_states = plug.cli.flag(
help="Allow other `issues list` states than 'all'. If this flag is "
"not specified, the 'issues list' command must have been run "
"with the '--all' flag.",
default=False,
)
teachers = plug.cli.option(
short_name="-t",
help=(
"One or more space-separated usernames of teachers/TAs "
"that are authorized to open grading issues. If a "
"grading issue is found by a user not in this list, "
"a warning is logged and the grade is not recorded."
),
argparse_kwargs={"nargs": "+"},
configurable=True,
required=True,
)
grade_specs = plug.cli.option(
short_name="--gs",
help="One or more grade specifications on the form "
"<PRIORITY>:<SYMBOL>:<REGEX>. Example: 1:P:[Pp]ass",
argparse_kwargs={"nargs": "+"},
configurable=True,
required=True,
)
edit_msg_file = plug.cli.option(
short_name="--ef",
help="filepath specifying where to put the edit message.",
converter=pathlib.Path,
configurable=True,
required=True,
)
grades_file = plug.cli.option(
short_name="--gf",
help="path to the csv file with student grades",
converter=pathlib.Path,
configurable=True,
required=True,
)
hook_results_file = plug.cli.option(
short_name="--hf",
help="path to an existing hook results file",
converter=pathlib.Path,
configurable=True,
required=True,
)
@staticmethod
def _parse_teachers(config_parser):
return [
name.strip()
for name in config_parser.get(
PLUGIN_NAME, "teachers", fallback=""
).split(",")
]
@staticmethod
def _parse_grade_specs(config_parser):
if not config_parser.has_section(PLUGIN_NAME):
return []
sec = config_parser[PLUGIN_NAME]
return [
value for key, value in sec.items() if key.endswith("gradespec")
] | /repobee-csvgrades-0.2.1.tar.gz/repobee-csvgrades-0.2.1/repobee_csvgrades/csvgrades.py | 0.566019 | 0.166641 | csvgrades.py | pypi |
import pathlib
import sys
from typing import List, Iterable
from repobee_csvgrades import _file
from repobee_csvgrades import _containers
from repobee_csvgrades import _exception
class Grades:
"""Abstraction of the grades file."""
def __init__(
self,
grades_file: pathlib.Path,
master_repo_names: List[str],
grade_specs: List[_containers.GradeSpec],
):
self._headers, self._contents = _file.read_grades_file(grades_file)
self._symbol_to_spec = {spec.symbol: spec for spec in grade_specs}
self._symbol_to_spec[""] = _containers.GradeSpec(
symbol="", priority=sys.maxsize, regex=""
)
self._usr_to_row, self._repo_to_col = extract_row_and_col_mappings(
self._headers, self._contents, master_repo_names
)
self._original_contents = self._contents
def __getitem__(self, key):
usr, repo = key
row = self._usr_to_row[usr]
col = self._repo_to_col[repo]
return self._contents[row][col]
def __setitem__(self, key, value):
usr, repo = key
row = self._usr_to_row[usr]
col = self._repo_to_col[repo]
self._contents[row][col] = value
def set(self, usr, repo, value) -> str:
old = self[usr, repo]
try:
old_spec = self._symbol_to_spec[old]
except KeyError as exc:
raise _exception.FileError(
"grades file contains unknown grade symbol {}".format(old)
) from exc
if old_spec.priority < value.priority:
raise _exception.GradingError("try to set higher priority grade")
self[usr, repo] = value.symbol
return old_spec
def check_users(self, usernames: Iterable[str]) -> bool:
missing_users = set(usernames) - set(self._usr_to_row.keys())
if missing_users:
raise _exception.FileError(
"student(s) {} missing from the grades file".format(
", ".join(sorted(missing_users))
)
)
@property
def csv(self):
output_contents = [self._headers, *self._contents]
column_widths = largest_cells(output_contents)
return [
[cell.rjust(column_widths[i]) for i, cell in enumerate(row)]
for row in output_contents
]
def extract_row_and_col_mappings(
grades_headers, grades_file_contents, master_repo_names
):
"""Extract mappings from username -> row_nr and master_repo_name ->
col_nr.
"""
master_repo_to_col_nr = {
repo_name: grades_headers.index(repo_name)
for repo_name in master_repo_names
}
username_col = grades_headers.index("username")
username_to_row_nr = {
row[username_col]: i for i, row in enumerate(grades_file_contents)
}
return username_to_row_nr, master_repo_to_col_nr
def largest_cells(rows):
"""Return a list with the widths of the largest cell of each column."""
transpose = list(zip(*rows))
widths = map(lambda row: map(len, row), transpose)
return list(map(max, widths)) | /repobee-csvgrades-0.2.1.tar.gz/repobee-csvgrades-0.2.1/repobee_csvgrades/_grades.py | 0.574514 | 0.192539 | _grades.py | pypi |
import sys
import os
import re
import collections
import pathlib
import subprocess
import math
from typing import Optional
from colored import bg, style
from repobee_junit4 import _java
SUCCESS_COLOR = bg("dark_green")
FAILURE_COLOR = bg("yellow")
DEFAULT_LINE_LENGTH_LIMIT = 150
DEFAULT_MAX_LINES = 10
class TestResult(
collections.namedtuple(
"TestResult",
"fqn success num_failed num_passed test_failures timeout".split(),
)
):
"""An immutable class for storing test results. Outside callers should use
the static build methods :py:meth:`TestResult.build` or
:py:meth:`TestResult.timed_out` to create instances.
Attributes:
fqn (str): The fully qualified name of the test class.
success (bool): True if the test exited with a 0 exit status.
num_failed (int): The amount of test cases that failed.
num_passed (int): The amount of test cases that passed.
test_failures (List[str]): A list of test failure messages.
timeout (Optional[int]): The amount of seconds after which the test
class timed out, or None if it did not time out.
"""
@staticmethod
def build(
test_class: pathlib.Path, proc: subprocess.CompletedProcess
) -> "TestResult":
"""Build a TestResult.
Args:
test_class: Path to the test class.
proc: A completed process of running the test class.
Returns:
A TestResult instance representing the test run.
"""
stdout = proc.stdout.decode(encoding=sys.getdefaultencoding())
return TestResult(
fqn=_java.fqn_from_file(test_class),
success=proc.returncode == 0,
num_failed=_get_num_failed(stdout),
num_passed=_get_num_passed(stdout),
test_failures=_parse_failed_tests(stdout),
timeout=None,
)
@staticmethod
def timed_out(test_class: pathlib.Path, timeout: int):
"""Create a TestResult instance from a test that timed out.
Args:
test_class: Path to the test class.
timeout: Amount of seconds after which the test class timed out.
Returns:
A TestResult instance representing the timed out test run.
"""
return TestResult(
fqn=_java.fqn_from_file(test_class),
success=False,
num_failed=0,
num_passed=0,
test_failures=list(),
timeout=timeout,
)
def pretty_result(self, verbose: bool) -> str:
"""Format this test as a pretty-printed message."""
title_color = SUCCESS_COLOR if self.success else FAILURE_COLOR
msg = test_result_header(
self.fqn,
self.num_passed + self.num_failed,
self.num_passed,
title_color,
self.timeout,
)
if not self.success and verbose:
msg += os.linesep + os.linesep.join(self.test_failures)
return msg
def _get_num_failed(test_output: str) -> int:
"""Get the amount of failed tests from the error output of JUnit4."""
match = re.search(r"Failures: (\d+)", test_output)
return int(match.group(1)) if match else 0
def _get_num_tests(test_output: str) -> int:
"""Get the total amount of tests. Only use this if there were test
failures!
"""
match = re.search(r"Tests run: (\d+)", test_output)
return int(match.group(1)) if match else 0
def _get_num_passed(test_output: str) -> int:
"""Get the amount of passed tests from the output of JUnit4."""
match = re.search(r"OK \((\d+) tests\)", test_output)
if not match: # there were failures
return _get_num_tests(test_output) - _get_num_failed(test_output)
return int(match.group(1))
def _parse_failed_tests(test_output: str) -> str:
"""Return a list of test failure descriptions, excluding stack traces."""
return re.findall(
r"^\d\) .*(?:\n(?!\s+at).*)*", test_output, flags=re.MULTILINE
)
def test_result_header(
test_class_name: str,
num_tests: int,
num_passed: int,
title_color: bg,
timeout: Optional[int] = None,
) -> str:
"""Return the header line for a test result."""
if timeout is None:
test_results = "Passed {}/{} tests".format(num_passed, num_tests)
else:
test_results = "Timed out after {} seconds".format(math.ceil(timeout))
msg = "{}{}{}: {}".format(
title_color, test_class_name, style.RESET, test_results
)
return msg
def format_results(test_results, compile_failed, verbose, very_verbose):
def format_compile_error(res):
msg = "{}Compile error:{} {}".format(bg("red"), style.RESET, res.msg)
if very_verbose:
return msg
elif verbose:
return _truncate_lines(msg)
else:
return msg.split("\n")[0]
def format_test_result(res):
msg = res.pretty_result(verbose or very_verbose)
if very_verbose:
return msg
elif verbose:
return _truncate_lines(msg, max_lines=sys.maxsize)
else:
return msg.split("\n")[0]
compile_error_messages = list(map(format_compile_error, compile_failed))
test_messages = list(map(format_test_result, test_results))
msg = os.linesep.join(compile_error_messages + test_messages)
if test_messages:
num_passed = sum([res.num_passed for res in test_results])
num_failed = sum([res.num_failed for res in test_results])
total = num_passed + num_failed
msg = (
"Test summary: Passed {}/{} of all executed tests{}".format(
num_passed, total, os.linesep
)
+ msg
)
return msg
def _truncate_lines(
string: str,
max_len: int = DEFAULT_LINE_LENGTH_LIMIT,
max_lines: int = DEFAULT_MAX_LINES,
):
"""Truncate lines to max_len characters."""
trunc_msg = " #[...]# "
if max_len <= len(trunc_msg):
raise ValueError(
"max_len must be greater than {}".format(len(trunc_msg))
)
effective_len = max_len - len(trunc_msg)
head_len = effective_len // 2
tail_len = effective_len // 2
def truncate(s):
if len(s) > max_len:
return s[:head_len] + trunc_msg + s[-tail_len:]
return s
lines = [truncate(line) for line in string.split(os.linesep)]
if len(lines) > max_lines:
lines = lines[: max_lines - 1] + [trunc_msg]
return os.linesep.join(lines) | /repobee-junit4-1.2.2.tar.gz/repobee-junit4-1.2.2/repobee_junit4/_output.py | 0.710628 | 0.310655 | _output.py | pypi |
import pathlib
import re
import os
import sys
import subprocess
import collections
from typing import Iterable, Tuple, Union, List
import repobee_plug as plug
from repobee_plug import Status
from repobee_junit4 import SECTION
from repobee_junit4 import _exception
def is_abstract_class(class_: pathlib.Path) -> bool:
"""Check if the file is an abstract class.
Args:
class_: Path to a Java class file.
Returns:
True if the class is abstract.
"""
assert class_.name.endswith(".java")
regex = r"^\s*?(public\s+)?abstract\s+class\s+{}".format(class_.name[:-5])
match = re.search(
regex,
class_.read_text(encoding=sys.getdefaultencoding()),
flags=re.MULTILINE,
)
return match is not None
def generate_classpath(*paths: pathlib.Path, classpath: str = "") -> str:
"""Return a classpath including all of the paths provided prepended to the
classpath. Always appends the current working directory to the end.
Args:
paths: One or more paths to add to the classpath.
classpath: An initial classpath to append to.
Returns:
a formated classpath to be used with ``java`` and ``javac``
"""
for path in paths:
classpath = "{}:{}".format(path, classpath)
classpath += ":."
return classpath
def fqn_from_file(java_filepath: pathlib.Path) -> str:
"""Extract the expected fully qualified class name for the given java file.
Args:
java_filepath: Path to a .java file.
"""
if not java_filepath.suffix == ".java":
raise ValueError("{} not a path to a .java file".format(java_filepath))
package = extract_package(java_filepath)
simple_name = java_filepath.name[: -len(java_filepath.suffix)]
return fqn(package, simple_name)
def extract_package(class_: pathlib.Path) -> str:
"""Return the name package of the class. An empty string
denotes the default package.
"""
assert class_.name.endswith(".java")
# yes, $ is a valid character for a Java identifier ...
ident = r"[\w$][\w\d_$]*"
regex = r"^\s*?package\s+({ident}(.{ident})*);".format(ident=ident)
with class_.open(encoding=sys.getdefaultencoding(), mode="r") as file:
# package statement must be on the first line
first_line = file.readline()
matches = re.search(regex, first_line)
if matches:
return matches.group(1)
return ""
def fqn(package_name: str, class_name: str) -> str:
"""Return the fully qualified name (Java style) of the class.
Args:
package_name: Name of the package. The default package should be an
empty string.
class_name: Canonical name of the class.
Returns:
The fully qualified name of the class.
"""
return (
class_name
if not package_name
else "{}.{}".format(package_name, class_name)
)
def properly_packaged(path: pathlib.Path, package: str) -> bool:
"""Check if the path ends in a directory structure that corresponds to the
package.
Args:
path: Path to a Java file.
package: The name of a Java package.
Returns:
True iff the directory structure corresponds to the package name.
"""
required_dir_structur = package.replace(".", os.path.sep)
return str(path).endswith(required_dir_structur)
def extract_package_root(class_: pathlib.Path, package: str) -> pathlib.Path:
"""Return the package root, given that class_ is the path to a .java file.
If the package is the default package (empty string), simply return a copy
of class_.
Raise if the directory structure doesn't correspond to the package
statement.
"""
_check_directory_corresponds_to_package(class_.parent, package)
root = class_.parent
if package:
root = class_.parents[len(package.split("."))]
return root
def javac(
java_files: Iterable[Union[str, pathlib.Path]], classpath: str
) -> Tuple[str, str]:
"""Run ``javac`` on all of the specified files, assuming that they are
all ``.java`` files.
Args:
java_files: paths to ``.java`` files.
classpath: The classpath to set.
Returns:
(status, msg), where status is e.g. :py:const:`Status.ERROR` and
the message describes the outcome in plain text.
"""
command = ["javac", "-cp", classpath, *[str(path) for path in java_files]]
proc = subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if proc.returncode != 0:
status = Status.ERROR
msg = proc.stderr.decode(sys.getdefaultencoding())
else:
msg = "all files compiled successfully"
status = Status.SUCCESS
return status, msg
def pairwise_compile(
test_classes: List[pathlib.Path],
java_files: List[pathlib.Path],
classpath: str,
) -> Tuple[List[plug.Result], List[plug.Result]]:
"""Compile test classes with their associated production classes.
For each test class:
1. Find the associated production class among the ``java_files``
2. Compile the test class together with all of the .java files in
the associated production class' directory.
Args:
test_classes: A list of paths to test classes.
java_files: A list of paths to java files from the student repo.
classpath: A base classpath to use.
Returns:
A tuple of lists of Results on the form ``(succeeded, failed)``
"""
failed = []
succeeded = []
# only use concrete test classes
concrete_test_classes = filter(
lambda t: not is_abstract_class(t), test_classes
)
for test_class in concrete_test_classes:
status, msg, prod_class_path = _pairwise_compile(
test_class, classpath, java_files
)
if status != Status.SUCCESS:
failed.append(plug.Result(SECTION, status, msg))
else:
succeeded.append((test_class, prod_class_path))
return succeeded, failed
def get_student_test_classes(
path: pathlib.Path, reference_test_classes: List[pathlib.Path]
) -> List[pathlib.Path]:
"""Return paths to all files that match the test classes in the
provided list. Raises if there is more than one or no matches for any
of the files.
Args:
path: Path to the repository worktree.
reference_test_classes: A list of paths to reference test classes.
These are assumed to be unique.
Returns:
A list of paths to test classes corresponding to the ones in the input
list, but in the student repository.
"""
filenames = {f.name for f in reference_test_classes}
matches = [file for file in path.rglob("*") if file.name in filenames]
_check_exact_matches(reference_test_classes, matches)
return matches
def _check_exact_matches(
reference_test_classes: List[pathlib.Path],
student_test_classes: List[pathlib.Path],
) -> None:
"""Check that for every path in reference_test_classes, there is a path in
student_test_classes with the same filename and the same package.
"""
def by_fqn(path):
pkg = extract_package(path)
return fqn(pkg, path.name)
duplicates = _extract_duplicates(student_test_classes)
if duplicates:
raise _exception.JavaError(
"Duplicates of the following test classes found in student repo: "
+ ", ".join(duplicates)
)
if len(student_test_classes) < len(reference_test_classes):
reference_filenames = {f.name for f in reference_test_classes}
student_filenames = {f.name for f in student_test_classes}
raise _exception.JavaError(
"Missing the following test classes in student repo: "
+ ", ".join(reference_filenames - student_filenames)
)
package_mismatch = []
for ref, match in zip(
sorted(reference_test_classes, key=by_fqn),
sorted(student_test_classes, key=by_fqn),
):
expected_package = extract_package(ref)
actual_package = extract_package(match)
if actual_package != expected_package:
package_mismatch.append((ref, expected_package, actual_package))
if package_mismatch:
errors = ", ".join(
"Student's {} expected to have package {}, but had {}".format(
ref.name, expected, actual
)
for ref, expected, actual in package_mismatch
)
raise _exception.JavaError("Package statement mismatch: " + errors)
def _pairwise_compile(test_class, classpath, java_files):
"""Compile the given test class together with its production class
counterpoint (if it can be found). Return a tuple of (status, msg).
"""
package = extract_package(test_class)
potential_prod_classes = _get_matching_prod_classes(
test_class, package, java_files
)
if len(potential_prod_classes) != 1:
status = Status.ERROR
msg = (
"no production class found for "
if not potential_prod_classes
else "multiple production classes found for "
) + fqn(package, test_class.name)
prod_class_path = None
else:
prod_class_path = potential_prod_classes[0]
adjacent_java_files = [
file
for file in prod_class_path.parent.glob("*.java")
if not file.name.endswith("Test.java")
] + list(test_class.parent.glob("*Test.java"))
status, msg = javac(
[*adjacent_java_files], generate_classpath(classpath=classpath)
)
return status, msg, prod_class_path
def _extract_duplicates(files: List[pathlib.Path]) -> List[pathlib.Path]:
counts = collections.Counter([f.name for f in files])
return [path for path, count in counts.items() if count > 1]
def _get_matching_prod_classes(test_class, package, java_files):
"""Find all production classes among the Java files that math the test
classes name and the package.
"""
prod_class_name = test_class.name.replace("Test.java", ".java")
return [
file
for file in java_files
if file.name == prod_class_name and extract_package(file) == package
]
def _check_directory_corresponds_to_package(path: pathlib.Path, package: str):
"""Check that the path ends in a directory structure that corresponds
to the package prefix.
"""
required_dir_structure = package.replace(".", os.path.sep)
if not str(path).endswith(required_dir_structure):
msg = (
"Directory structure does not conform to package statement. Dir:"
" '{}' Package: '{}'".format(path, package)
)
raise ValueError(msg) | /repobee-junit4-1.2.2.tar.gz/repobee-junit4-1.2.2/repobee_junit4/_java.py | 0.787278 | 0.383612 | _java.py | pypi |
import os
import pathlib
import re
from typing import Tuple, List
import daiquiri
import repobee_plug as plug
from repobee_junit4 import _java
from repobee_junit4 import _junit4_runner
from repobee_junit4 import _exception
from repobee_junit4 import _output
from repobee_junit4 import SECTION
from repobee_junit4._generate_rtd import GenerateRTD # noqa: F401
LOGGER = daiquiri.getLogger(__file__)
ResultPair = Tuple[pathlib.Path, pathlib.Path]
DEFAULT_TIMEOUT = 10
CLASSPATH = os.getenv("CLASSPATH") or ""
class JUnit4Hooks(plug.Plugin, plug.cli.CommandExtension):
__settings__ = plug.cli.command_extension_settings(
actions=[plug.cli.CoreCommand.repos.clone]
)
junit4_reference_tests_dir = plug.cli.option(
help="path to a directory with reference tests",
required=True,
configurable=True,
)
junit4_ignore_tests = plug.cli.option(
help="names of test classes to ignore", argparse_kwargs=dict(nargs="+")
)
junit4_hamcrest_path = plug.cli.option(
help="absolute path to the hamcrest library",
required=not re.search(_junit4_runner.JUNIT4_JAR_PATTERN, CLASSPATH),
configurable=True,
)
junit4_junit_path = plug.cli.option(
help="absolute path to the junit4 library",
required=not re.search(_junit4_runner.JUNIT4_JAR_PATTERN, CLASSPATH),
configurable=True,
)
junit4_disable_security = plug.cli.flag(
help="disable the default security policy "
"(student code can do whatever)"
)
verbosity = plug.cli.mutually_exclusive_group(
junit4_verbose=plug.cli.flag(
help="display more information about test failures"
),
junit4_very_verbose=plug.cli.flag(
help="display the full failure output, without truncating"
),
)
junit4_run_student_tests = plug.cli.flag(
help="run test classes found in the student repos instead that match "
"test classes from the reference tests directory"
)
junit4_timeout = plug.cli.option(
help="maximum amount of seconds a test class is allowed to run "
"before timing out",
configurable=True,
default=DEFAULT_TIMEOUT,
converter=int,
)
def post_clone(
self, repo: plug.StudentRepo, api: plug.PlatformAPI
) -> plug.Result:
"""Look for production classes in the student repo corresponding to
test classes in the reference tests directory.
Assumes that all test classes end in ``Test.java`` and that there is
a directory with the same name as the master repo in the reference
tests directory.
Args:
repo: A student repo.
api: An instance of the platform API.
Returns:
a plug.Result specifying the outcome.
"""
self._check_jars_exist()
if not pathlib.Path(self.junit4_reference_tests_dir).is_dir():
raise plug.PlugError(
"{} is not a directory".format(self.junit4_reference_tests_dir)
)
assert self.args.assignments
assert self.junit4_reference_tests_dir
try:
if not repo.path.exists():
return plug.Result(
SECTION,
plug.Status.ERROR,
"student repo {!s} does not exist".format(repo.path),
)
compile_succeeded, compile_failed = self._compile_all(repo)
test_results = self._run_tests(compile_succeeded)
has_failures = compile_failed or any(
map(lambda r: not r.success, test_results)
)
msg = _output.format_results(
test_results,
compile_failed,
self.junit4_verbose,
self.junit4_very_verbose,
)
status = (
plug.Status.ERROR
if compile_failed
else (
plug.Status.WARNING
if has_failures
else plug.Status.SUCCESS
)
)
return plug.Result(SECTION, status, msg)
except _exception.ActError as exc:
return exc.hook_result
except Exception as exc:
plug.log.exception("critical")
return plug.Result(SECTION, plug.Status.ERROR, str(exc))
def _compile_all(
self, repo: plug.StudentRepo
) -> Tuple[List[ResultPair], List[plug.Result]]:
"""Attempt to compile all java files in the repo.
Returns:
a tuple of lists ``(succeeded, failed)``, where ``succeeded``
are tuples on the form ``(test_class, prod_class)`` paths.
"""
java_files = [
file for file in repo.path.rglob("*.java") if file.is_file()
]
assignment_name = self._extract_assignment_name(repo.name)
reference_test_classes = self._find_test_classes(assignment_name)
test_classes = (
_java.get_student_test_classes(repo.path, reference_test_classes)
if self.junit4_run_student_tests
else reference_test_classes
)
compile_succeeded, compile_failed = _java.pairwise_compile(
test_classes, java_files, classpath=self._generate_classpath()
)
return compile_succeeded, compile_failed
def _extract_assignment_name(self, repo_name: str) -> str:
matches = list(filter(repo_name.endswith, self.args.assignments))
if len(matches) == 1:
return matches[0]
else:
msg = (
"no assignment name matching the student repo"
if not matches
else "multiple matching master repo names: {}".format(
", ".join(matches)
)
)
res = plug.Result(SECTION, plug.Status.ERROR, msg)
raise _exception.ActError(res)
def _find_test_classes(self, assignment_name) -> List[pathlib.Path]:
"""Find all test classes (files ending in ``Test.java``) in directory
at <reference_tests_dir>/<assignment_name>.
Args:
assignment_name: Name of an assignment.
Returns:
a list of test classes from the corresponding reference test
directory.
"""
test_dir = (
pathlib.Path(self.junit4_reference_tests_dir) / assignment_name
)
if not test_dir.is_dir():
res = plug.Result(
SECTION,
plug.Status.ERROR,
"no reference test directory for {} in {}".format(
assignment_name, self.junit4_reference_tests_dir
),
)
raise _exception.ActError(res)
test_classes = [
file
for file in test_dir.rglob("*.java")
if file.name.endswith("Test.java")
and file.name not in (self.junit4_ignore_tests or [])
]
if not test_classes:
res = plug.Result(
SECTION,
plug.Status.WARNING,
"no files ending in `Test.java` found in {!s}".format(
test_dir
),
)
raise _exception.ActError(res)
return test_classes
def _run_tests(
self, test_prod_class_pairs: ResultPair
) -> _output.TestResult:
"""Run tests and return the results.
Args:
test_prod_class_pairs: A list of tuples on the form
``(test_class_path, prod_class_path)``
Returns:
A TestResult for each test class run.
"""
results = []
classpath = self._generate_classpath()
with _junit4_runner.security_policy(
classpath, active=not self.junit4_disable_security
) as security_policy:
for test_class, prod_class in test_prod_class_pairs:
test_result = _junit4_runner.run_test_class(
test_class,
prod_class,
classpath=classpath,
security_policy=security_policy,
timeout=self.junit4_timeout,
)
results.append(test_result)
return results
def _generate_classpath(self, *paths: pathlib.Path) -> str:
"""
Args:
paths: One or more paths to add to the classpath.
Returns:
a formated classpath to be used with ``java`` and ``javac``
"""
warn = (
"`{}` is not configured and not on the CLASSPATH variable."
"This will probably crash."
)
if not (
self.junit4_hamcrest_path
or re.search(_junit4_runner.HAMCREST_JAR_PATTERN, CLASSPATH)
):
LOGGER.warning(warn.format("hamcrest"))
if not (
self.junit4_junit_path
or re.search(_junit4_runner.JUNIT4_JAR_PATTERN, CLASSPATH)
):
LOGGER.warning(warn.format("junit4"))
paths = list(paths)
if self.junit4_hamcrest_path:
paths.append(self.junit4_hamcrest_path)
if self.junit4_junit_path:
paths.append(self.junit4_junit_path)
return _java.generate_classpath(*paths, classpath=CLASSPATH)
def _check_jars_exist(self):
"""Check that the specified jar files actually exist."""
junit_path = self.junit4_junit_path or _parse_from_classpath(
_junit4_runner.JUNIT4_JAR_PATTERN, CLASSPATH
)
hamcrest_path = self.junit4_hamcrest_path or _parse_from_classpath(
_junit4_runner.HAMCREST_JAR_PATTERN, CLASSPATH
)
for raw_path in (junit_path, hamcrest_path):
if not pathlib.Path(raw_path).is_file():
raise plug.PlugError(
"{} is not a file, please check the filepath you "
"specified".format(raw_path)
)
def _parse_from_classpath(pattern: str, classpath: str) -> pathlib.Path:
matches = re.search(pattern, classpath)
if not matches:
raise plug.PlugError(
f"expected to find match for '{pattern}' on the CLASSPATH variable"
)
return matches.group(0) if matches else None | /repobee-junit4-1.2.2.tar.gz/repobee-junit4-1.2.2/repobee_junit4/junit4.py | 0.753285 | 0.186762 | junit4.py | pypi |
import pathlib
import tempfile
import re
import sys
import subprocess
import os
import contextlib
from typing import Optional
import daiquiri
from repobee_junit4 import _java
from repobee_junit4 import _output
LOGGER = daiquiri.getLogger(__file__)
HAMCREST_JAR_PATTERN = rf"([^{os.pathsep}]*hamcrest-core-1.3.jar)"
JUNIT4_JAR_PATTERN = rf"([^{os.pathsep}]*junit-4\.\d+\.(\d+\.)?jar)"
_DEFAULT_SECURITY_POLICY_TEMPLATE = """grant {{
}};
grant codeBase "file:{junit4_jar_path}" {{
permission java.lang.RuntimePermission "accessDeclaredMembers";
}};
"""
@contextlib.contextmanager
def security_policy(classpath: str, active: bool):
"""Yield the path to the default security policy file if ``active``,
else yield None. The policy file is deleted once the context is
exited.
TODO: Make it possible to use a custom security policy here.
"""
if not active:
LOGGER.warning(
"Security policy disabled, student code running without "
"restrictions"
)
yield
return
with tempfile.NamedTemporaryFile() as security_policy_file:
policy = _generate_default_security_policy(classpath)
security_policy_file.write(
policy.encode(encoding=sys.getdefaultencoding())
)
security_policy_file.flush()
yield pathlib.Path(security_policy_file.name)
def _generate_default_security_policy(classpath: str) -> str:
"""Generate the default security policy from the classpath. JUnit4 jar must
be on the classpath.
"""
junit_jar_matches = re.search(JUNIT4_JAR_PATTERN, classpath)
if not junit_jar_matches:
raise ValueError("junit4 jar not on the classpath")
path = junit_jar_matches.group(0)
return _DEFAULT_SECURITY_POLICY_TEMPLATE.format(junit4_jar_path=path)
def _extract_conforming_package(test_class, prod_class):
"""Extract a package name from the test and production class.
Raise if the test class and production class have different package
statements.
"""
test_package = _java.extract_package(test_class)
prod_package = _java.extract_package(prod_class)
if test_package != prod_package:
msg = (
"Test class {} in package {}, but class {} in package {}"
).format(test_class.name, test_package, prod_class.name, prod_package)
raise ValueError(msg)
return test_package
def run_test_class(
test_class: pathlib.Path,
prod_class: pathlib.Path,
classpath: str,
timeout: int,
security_policy: Optional[pathlib.Path] = None,
) -> _output.TestResult:
"""Run a single test class on a single production class.
Args:
test_class: Path to a Java test class.
prod_class: Path to a Java production class.
classpath: A classpath to use in the tests.
timeout: Maximum amount of time the test class is allowed to run, in
seconds.
security_policy: A JVM security policy to apply during test execution.
Returns:
Test results.
"""
package = _extract_conforming_package(test_class, prod_class)
prod_class_dir = _java.extract_package_root(prod_class, package)
test_class_dir = _java.extract_package_root(test_class, package)
test_class_name = _java.fqn_from_file(test_class)
classpath = _java.generate_classpath(
test_class_dir, prod_class_dir, classpath=classpath
)
command = ["java", "-enableassertions"]
if security_policy:
command += [
"-Djava.security.manager",
"-Djava.security.policy=={!s}".format(security_policy),
]
command += [
"-cp",
classpath,
"org.junit.runner.JUnitCore",
test_class_name,
]
try:
proc = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=timeout,
)
return _output.TestResult.build(test_class=test_class, proc=proc)
except subprocess.TimeoutExpired as exc:
return _output.TestResult.timed_out(
test_class=test_class, timeout=exc.timeout
) | /repobee-junit4-1.2.2.tar.gz/repobee-junit4-1.2.2/repobee_junit4/_junit4_runner.py | 0.57821 | 0.209066 | _junit4_runner.py | pypi |
import collections
import enum
import argparse
import pluggy
import typing
from typing import Mapping, Any, List, Optional, Callable, Iterable
from repobee_plug import exception
hookspec = pluggy.HookspecMarker(__package__)
hookimpl = pluggy.HookimplMarker(__package__)
class Status(enum.Enum):
"""Status codes enum."""
SUCCESS = "success"
WARNING = "warning"
ERROR = "error"
class HookResult(
collections.namedtuple("HookResult", ("hook", "status", "msg", "data"))
):
"""Container for storing results from hooks."""
def __new__(
cls,
hook: str,
status: Status,
msg: str,
data: Mapping[Any, Any] = None,
):
"""
Args:
hook: Name of the hook.
status: Status of the hook execution.
msg: A hook message.
data: Any additional data that the hook would like to report. This
is primarily for storing hook results as JSON.
"""
return super().__new__(cls, hook, status, msg, data)
class ExtensionParser(argparse.ArgumentParser):
"""An ArgumentParser specialized for RepoBee extension commands."""
def __init__(self):
super().__init__(add_help=False)
class BaseParser(enum.Enum):
"""Enumeration of base parsers that an extension command can request."""
BASE = "base"
STUDENTS = "students"
REPO_NAMES = "repo-names"
MASTER_ORG = "master-org"
class ExtensionCommand(
collections.namedtuple(
"ExtensionCommand",
(
"parser",
"name",
"help",
"description",
"callback",
"requires_api",
"requires_base_parsers",
),
)
):
"""Class defining an extension command for the RepoBee CLI."""
def __new__(
cls,
parser: ExtensionParser,
name: str,
help: str,
description: str,
callback: Callable[[argparse.Namespace, "apimeta.API"], None],
requires_api: bool = False,
requires_base_parsers: Optional[Iterable[BaseParser]] = None,
):
"""
Args:
parser: The parser to use for the CLI.
name: Name of the command.
help: Text that will be displayed when running ``repobee -h``
description: Text that will be displayed when calling the ``-h``
option for this specific command. Should be elaborate in
describing the usage of the command.
callback: A callback function that is called if this command is
used on the CLI. It is passed the parsed namespace and the
platform API, and is expected to return nothing.
requires_api: If True, the base arguments required for the platform
API are added as options to the extension command, and the
platform API is then passed to the callback function. It is
then important not to have clashing option names. If False, the
base arguments are not added to the CLI, and None is passed in
place of the API.
"""
if not isinstance(parser, ExtensionParser):
raise exception.ExtensionCommandError(
"parser must be a {.__name__}".format(ExtensionParser)
)
if not callable(callback):
raise exception.ExtensionCommandError(
"callback must be a callable"
)
return super().__new__(
cls,
parser,
name,
help,
description,
callback,
requires_api,
requires_base_parsers,
)
def __eq__(self, other):
"""Two ExtensionCommands are equal if they compare equal in all
respects except for the parser, as argpars.ArgumentParser instances do
not implement __eq__.
"""
_, *rest = self
_, *other_rest = other
return rest == other_rest
ReviewAllocation = collections.namedtuple(
"ReviewAllocation", "review_team reviewed_team"
)
Review = collections.namedtuple("Review", ["repo", "done"]) | /repobee_plug-0.12.0a1-py3-none-any.whl/repobee_plug/containers.py | 0.912845 | 0.161849 | containers.py | pypi |
from typing import Union, Optional, Iterable, List, Mapping, Callable, Tuple
from repobee_plug import apimeta
from repobee_plug import containers
from repobee_plug.containers import hookspec
class PeerReviewHook:
"""Hook functions related to allocating peer reviews."""
@hookspec(firstresult=True)
def generate_review_allocations(
self, teams: List[apimeta.Team], num_reviews: int
) -> List[containers.ReviewAllocation]:
"""Generate :py:class:`~repobee_plug.containers.ReviewAllocation`
tuples from the provided teams, given that this concerns reviews for a
single master repo.
The provided teams of students should be treated as units. That is to
say, if there are multiple members in a team, they should always be
assigned to the same review team. The best way to merge two teams
``team_a`` and ``team_b`` into one review team is to simply do:
.. code-block:: python
team_c = apimeta.Team(members=team_a.members + team_b.members)
This can be scaled to however many teams you would like to merge. As a
practical example, if teams ``team_a`` and ``team_b`` are to review
``team_c``, then the following
:py:class:`~repobee_plug.containers.ReviewAllocation` tuple, here
called ``allocation``, should be contained in the returned list.
.. code-block:: python
review_team = apimeta.Team(members=team_a.members + team_b.members)
allocation = containers.ReviewAllocation(
review_team=review_team,
reviewed_team=team_c,
)
.. note::
Respecting the ``num_reviews`` argument is optional: only do it if
it makes sense. It's good practice to issue a warning if
num_reviews is ignored, however.
Args:
team: A list of :py:class:`~repobee_plug.apimeta.Team` tuples.
num_reviews: Amount of reviews each student should perform (and
consequently amount of reviewers per repo)
Returns:
A list of :py:class:`~repobee_plug.containers.ReviewAllocation` tuples.
"""
class APIHook:
"""Hooks related to platform APIs."""
@hookspec(firstresult=True)
def get_api_class(self):
"""Return an API platform class. Must be a subclass of apimeta.API.
Returns:
An apimeta.API subclass.
"""
@hookspec(firstresult=True)
def api_init_requires(self) -> Tuple[str]:
"""Return which of the arguments to apimeta.APISpec.__init__ that the
given API requires. For example, the GitHubAPI requires all, but the
GitLabAPI does not require ``user``.
Returns:
Names of the required arguments.
""" | /repobee_plug-0.12.0a1-py3-none-any.whl/repobee_plug/corehooks.py | 0.92647 | 0.372705 | corehooks.py | pypi |
import pathlib
import argparse
import configparser
from typing import Union, Optional
from repobee_plug.apimeta import API
from repobee_plug.containers import hookspec
from repobee_plug.containers import HookResult
from repobee_plug.containers import ExtensionCommand
from repobee_plug.tasks import Task
class CloneHook:
"""Hook functions related to cloning repos."""
@hookspec
def clone_task(self) -> Task:
"""Create a task to run on a copy of a cloned student repo. This hook
replaces the old ``act_on_cloned_repo`` hook, which will be removed in
RepoBee v3.0.0.
Implementations of this hook should return a :py:class:`~Task`, which
defines a callback that is called after all student repos have been
cloned. See the definition of :py:class:`~Task` for details.
Returns:
A :py:class:`~Task` instance defining a RepoBee task.
"""
@hookspec
def act_on_cloned_repo(
self, path: Union[str, pathlib.Path], api
) -> Optional[HookResult]:
"""Do something with a cloned repo.
Args:
path: Path to the repo.
api: An instance of :py:class:`repobee.github_api.GitHubAPI`.
Returns:
optionally returns a HookResult namedtuple for reporting the
outcome of the hook. May also return None, in which case no
reporting will be performed for the hook.
"""
@hookspec
def clone_parser_hook(self, clone_parser: argparse.ArgumentParser) -> None:
"""Do something with the clone repos subparser before it is used used to
parse CLI options. The typical task is to add options to it.
Args:
clone_parser: The ``clone`` subparser.
"""
@hookspec
def parse_args(self, args: argparse.Namespace) -> None:
"""Get the raw args from the parser. Only called for the clone parser.
The typical task is to fetch any values from options added in
:py:func:`clone_parser_hook`.
Args:
args: The full namespace returned by
:py:func:`argparse.ArgumentParser.parse_args`
"""
@hookspec
def config_hook(self, config_parser: configparser.ConfigParser) -> None:
"""Hook into the config file parsing.
Args:
config: the config parser after config has been read.
"""
class SetupHook:
"""Hook functions related to setting up student repos."""
@hookspec
def setup_task(self) -> Task:
"""Create a task to run on a copy of the master repo before it is
pushed out to student repositories. This can for example be pre-flight
checks of the master repo, or something else entirely.
Implementations of this hook should return a :py:class:`~Task`, which
defines a callback that is called after the master repo has been safely
copied, but before that copy is pushed out to student repositories.
Note that any changes to the repository must be committed to actually
show up in the student repositories.
.. note::
Structural changes to the master repo are not currently supported.
Changes to the repository during the callback will not be reflected
in the generated repositories. Support for preprocessing (such that
changes do take effect) is a potential future feature.
"""
class ExtensionCommandHook:
"""Hooks related to extension commands."""
@hookspec
def create_extension_command(self) -> ExtensionCommand:
"""Create an extension command to add to the RepoBee CLI. The command will
be added as one of the top-level subcommands of RepoBee. It should return
an :py:class:`~repobee_plug.containers.ExtensionCommand`.
.. code-block:: python
def command(args: argparse.Namespace, api: apimeta.API)
The ``command`` function will be called if the extension command is used
on the command line.
Note that the :py:class:`~repobee_plug.containers.RepoBeeExtensionParser` class
is just a thin wrapper around :py:class:`argparse.ArgumentParser`, and can
be used in an identical manner. The following is an example definition
of this hook that adds a subcommand called ``example-command``, that can
be called with ``repobee example-command``.
.. code-block:: python
def callback(args: argparse.Namespace, api: apimeta.API) -> None:
LOGGER.info("callback called with: {}, {}".format(args, api))
@plug.repobee_hook
def create_extension_command():
parser = plug.RepoBeeExtensionParser()
parser.add_argument("-b", "--bb", help="A useless argument")
return plug.ExtensionCommand(
parser=parser,
name="example-command",
help="An example command",
description="Description of an example command",
callback=callback,
)
.. important:
The ``-tb|--traceback`` argument is always added to the parser.
Make sure not to add any conflicting arguments.
.. important::
If you need to use the api, you set ``requires_api=True`` in the
``ExtensionCommand``. This will automatically add the options that
the API requires to the CLI options of the subcommand, and
initialize the api and pass it in.
Returns:
A :py:class:`~repobee_plug.containers.ExtensionCommand`.
""" | /repobee_plug-0.12.0a1-py3-none-any.whl/repobee_plug/exthooks.py | 0.841923 | 0.292896 | exthooks.py | pypi |
import inspect
import enum
import collections
import itertools
import datetime
from typing import List, Iterable, Optional, Generator, Tuple, Mapping
from repobee_plug import exception
MAX_NAME_LENGTH = 100
class APIObject:
"""Base wrapper class for platform API objects."""
def __getattribute__(self, name: str):
"""If the sought attr is 'implementation', and that attribute is None,
an AttributeError should be raise. This is because there should never
be a case where the caller tries to access a None implementation: if
it's None the caller should now without checking, as only API objects
returned by platform API (i.e. a class deriving from :py:class:`API`)
can have a reasonable value for the implementation attribute.
In all other cases, proceed as usual in getting the attribute. This
includes the case when ``name == "implementation"``, and the APIObject
does not have that attribute.
"""
attr = object.__getattribute__(self, name)
if attr is None and name == "implementation":
raise AttributeError("invalid access to 'implementation': not initialized")
return attr
class TeamPermission(enum.Enum):
"""Enum specifying team permissions on creating teams. On GitHub, for
example, this can be e.g. `push` or `pull`.
"""
PUSH = "push"
PULL = "pull"
class IssueState(enum.Enum):
"""Enum specifying a possible issue state."""
OPEN = "open"
CLOSED = "closed"
ALL = "all"
def _check_name_length(name):
"""Check that a Team/Repository name does not exceed the maximum GitHub
allows (100 characters)
"""
if len(name) > MAX_NAME_LENGTH:
raise ValueError(
"generated Team/Repository name is too long, was {} chars, "
"max is {} chars".format(len(name), MAX_NAME_LENGTH)
)
class Repo(
APIObject,
collections.namedtuple(
"Repo", "name description private team_id url implementation".split()
),
):
"""Wrapper class for a Repo API object."""
def __new__(
cls, name, description, private, team_id=None, url=None, implementation=None
):
_check_name_length(name)
return super().__new__(
cls, name, description, private, team_id, url, implementation
)
class Team(
APIObject, collections.namedtuple("Repo", "name members id implementation".split())
):
"""Wrapper class for a Team API object."""
def __new__(cls, members, name=None, id=None, implementation=None):
if not name:
name = "-".join(sorted(members))
_check_name_length(name)
return super().__new__(cls, name, members, id, implementation)
def __str__(self):
return self.name
class Issue(
APIObject,
collections.namedtuple(
"Issue", "title body number created_at author implementation".split()
),
):
"""Wrapper class for an Issue API object."""
def __new__(
cls, title, body, number=None, created_at=None, author=None, implementation=None
):
return super().__new__(
cls, title, body, number, created_at, author, implementation
)
def to_dict(self):
"""Return a dictionary representation of this namedtuple, without
the ``implementation`` field.
"""
asdict = self._asdict()
del asdict["implementation"]
return asdict
@staticmethod
def from_dict(asdict: dict) -> "Issue":
"""Take a dictionary produced by Issue.to_dict and reconstruct the
corresponding instance. The ``implementation`` field is lost in a
to_dict -> from_dict roundtrip.
"""
return Issue(**asdict)
class APISpec:
"""Wrapper class for API method stubs.
.. important::
This class should not be inherited from directly, it serves only to
document the behavior of a platform API. Classes that implement this
behavior should inherit from :py:class:`API`.
"""
def __init__(self, base_url, token, org_name, user):
_not_implemented()
def ensure_teams_and_members(
self, teams: Iterable[Team], permission: TeamPermission = TeamPermission.PUSH
) -> List[Team]:
"""Ensure that the teams exist, and that their members are added to the
teams.
Teams that do not exist are created, teams that already exist are
fetched. Members that are not in their teams are added, members that do
not exist or are already in their teams are skipped.
Args:
teams: A list of teams specifying student groups.
permission: The permission these teams (or members of them) should
be given in regards to associated repositories.
Returns:
A list of Team API objects of the teams provided to the function,
both those that were created and those that already existed.
"""
_not_implemented()
def get_teams(self) -> List[Team]:
"""Get all teams related to the target organization.
Returns:
A list of Team API object.
"""
_not_implemented()
def create_repos(self, repos: Iterable[Repo]) -> List[str]:
"""Create repos in the target organization according the those specced
by the ``repos`` argument. Repos that already exist are skipped.
Args:
repos: Repos to be created.
Returns:
A list of urls to the repos specified by the ``repos`` argument,
both those that were created and those that already existed.
"""
_not_implemented()
def get_repo_urls(
self,
master_repo_names: Iterable[str],
org_name: Optional[str] = None,
teams: Optional[List[Team]] = None,
) -> List[str]:
"""Get repo urls for all specified repo names in the organization. As
checking if every single repo actually exists takes a long time with a
typical REST API, this function does not in general guarantee that the
urls returned actually correspond to existing repos.
If the ``org_name`` argument is supplied, urls are computed relative to
that organization. If it is not supplied, the target organization is
used.
If the `teams` argument is supplied, student repo urls are
computed instead of master repo urls.
Args:
master_repo_names: A list of master repository names.
org_name: Organization in which repos are expected. Defaults to the
target organization of the API instance.
teams: A list of teams specifying student groups. Defaults to None.
Returns:
a list of urls corresponding to the repo names.
"""
_not_implemented()
def extract_repo_name(self, repo_url: str) -> str:
"""Extract a repo name from the provided url.
Args:
repo_url: A URL to a repository.
Returns:
The name of the repository corresponding to the url.
"""
_not_implemented()
def get_issues(
self,
repo_names: Iterable[str],
state: IssueState = IssueState.OPEN,
title_regex: str = "",
) -> Generator[Tuple[str, Generator[Issue, None, None]], None, None]:
"""Get all issues for the repos in repo_names an return a generator
that yields (repo_name, issue generator) tuples. Will by default only
get open issues.
Args:
repo_names: An iterable of repo names.
state: Specifies the state of the issue.
title_regex: If specified, only issues matching this regex are
returned. Defaults to the empty string (which matches anything).
Returns:
A generator that yields (repo_name, issue_generator) tuples.
"""
_not_implemented()
def open_issue(self, title: str, body: str, repo_names: Iterable[str]) -> None:
"""Open the specified issue in all repos with the given names, in the
target organization.
Args:
title: Title of the issue.
body: Body of the issue.
repo_names: Names of repos to open the issue in.
"""
_not_implemented()
def close_issue(self, title_regex: str, repo_names: Iterable[str]) -> None:
"""Close any issues in the given repos in the target organization,
whose titles match the title_regex.
Args:
title_regex: A regex to match against issue titles.
repo_names: Names of repositories to close issues in.
"""
_not_implemented()
def add_repos_to_review_teams(
self, team_to_repos: Mapping[str, Iterable[str]], issue: Optional[Issue] = None
) -> None:
"""Add repos to review teams. For each repo, an issue is opened, and
every user in the review team is assigned to it. If no issue is
specified, sensible defaults for title and body are used.
Args:
team_to_repos: A mapping from a team name to an iterable of repo
names.
issue: An optional Issue tuple to override the default issue.
"""
_not_implemented()
def get_review_progress(
self, review_team_names: Iterable[str], teams: Iterable[Team], title_regex: str
) -> Mapping[str, List]:
"""Get the peer review progress for the specified review teams and
student teams by checking which review team members have opened issues
in their assigned repos. Only issues matching the title regex will be
considered peer review issues. If a reviewer has opened an issue in the
assigned repo with a title matching the regex, the review will be
considered done.
Note that reviews only count if the student is in the review team for
that repo. Review teams must only have one associated repo, or the repo
is skipped.
Args:
review_team_names: Names of review teams.
teams: Team API objects specifying student groups.
title_regex: If an issue title matches this regex, the issue is
considered a potential peer review issue.
Returns:
a mapping (reviewer -> assigned_repos), where reviewer is a str and
assigned_repos is a :py:class:`_repobee.tuples.Review`.
"""
_not_implemented()
def delete_teams(self, team_names: Iterable[str]) -> None:
"""Delete all teams in the target organizatoin that exactly match one
of the provided ``team_names``. Skip any team name for which no match
is found.
Args:
team_names: A list of team names for teams to be deleted.
"""
_not_implemented()
@staticmethod
def verify_settings(
user: str,
org_name: str,
base_url: str,
token: str,
master_org_name: Optional[str] = None,
):
"""Verify the following (to the extent that is possible and makes sense
for the specifi platform):
1. Base url is correct
2. The token has sufficient access privileges
3. Target organization (specifiend by ``org_name``) exists
- If master_org_name is supplied, this is also checked to
exist.
4. User is owner in organization (verify by getting
- If master_org_name is supplied, user is also checked to be an
owner of it.
organization member list and checking roles)
Should raise an appropriate subclass of
:py:class:`_repobee.exception.APIError` when a problem is encountered.
Args:
user: The username to try to fetch.
org_name: Name of the target organization.
base_url: A base url to a github API.
token: A secure OAUTH2 token.
org_name: Name of the master organization.
Returns:
True if the connection is well formed.
Raises:
:py:class:`_repobee.exception.APIError`
"""
_not_implemented()
def _not_implemented():
raise NotImplementedError(
"The chosen API does not currently support this functionality"
)
def methods(attrdict):
"""Return all public methods and __init__ for some class."""
return {
name: method
for name, method in attrdict.items()
if callable(method) and (not name.startswith("_") or name == "__init__")
}
def parameters(function):
"""Extract parameter names and default arguments from a function."""
return [
(param.name, param.default)
for param in inspect.signature(function).parameters.values()
]
def check_init_params(reference_params, compare_params):
"""Check that the compare __init__'s parameters are a subset of the
reference class's version.
"""
extra = set(compare_params) - set(reference_params)
if extra:
raise exception.APIImplementationError(
"unexpected arguments to __init__: {}".format(extra)
)
def check_parameters(reference, compare):
"""Check if the parameters match, one by one. Stop at the first diff and
raise an exception for that parameter.
An exception is made for __init__, for which the compare may be a subset of
the reference in no particular order.
"""
reference_params = parameters(reference)
compare_params = parameters(compare)
if reference.__name__ == "__init__":
check_init_params(reference_params, compare_params)
return
for ref, cmp in itertools.zip_longest(reference_params, compare_params):
if ref != cmp:
raise exception.APIImplementationError(
"{}: expected parameter '{}', found '{}'".format(
reference.__name__, ref, cmp
)
)
class APIMeta(type):
"""Metaclass for an API implementation. All public methods must be a
specified api method, but all api methods do not need to be implemented.
"""
def __new__(mcs, name, bases, attrdict):
api_methods = methods(APISpec.__dict__)
implemented_methods = methods(attrdict)
non_api_methods = set(implemented_methods.keys()) - set(api_methods.keys())
if non_api_methods:
raise exception.APIImplementationError(
"non-API methods may not be public: {}".format(non_api_methods)
)
for method_name, method in api_methods.items():
if method_name in implemented_methods:
check_parameters(method, implemented_methods[method_name])
return super().__new__(mcs, name, bases, attrdict)
class API(APISpec, metaclass=APIMeta):
"""API base class that all API implementations should inherit from. This
class functions similarly to an abstract base class, but with a few key
distinctions that affect the inheriting class.
1. Public methods *must* override one of the public methods of
:py:class:`APISpec`. If an inheriting class defines any other public
method, an :py:class:`~_repobee.exception.APIError` is raised when the
class is defined.
2. All public methods in :py:class:`APISpec` have a default implementation
that simply raise a :py:class:`NotImplementedError`. There is no
requirement to implement any of them.
""" | /repobee_plug-0.12.0a1-py3-none-any.whl/repobee_plug/apimeta.py | 0.871748 | 0.216012 | apimeta.py | pypi |
from typing import Dict, List
from repobee_plug import exception
from repobee_plug import corehooks
from repobee_plug import exthooks
from repobee_plug import containers
_HOOK_METHODS = {
key: value
for key, value in [
*exthooks.CloneHook.__dict__.items(),
*corehooks.PeerReviewHook.__dict__.items(),
*corehooks.APIHook.__dict__.items(),
*exthooks.ExtensionCommandHook.__dict__.items(),
]
if callable(value) and not key.startswith("_")
}
class _PluginMeta(type):
"""Metaclass used for converting methods with appropriate names into
hook methods.
Also ensures that all public methods have the name of a hook method.
Checking signatures is handled by pluggy on registration.
"""
def __new__(cls, name, bases, attrdict):
"""Check that all public methods have hook names, convert to hook
methods and return a new instance of the class. If there are any
public methods that have non-hook names,
:py:function:`repobee_plug.exception.HookNameError` is raised.
Checking signatures is delegated to ``pluggy`` during registration of
the hook.
"""
methods = cls._extract_public_methods(attrdict)
cls._check_names(methods)
hooked_methods = {
name: containers.hookimpl(method) for name, method in methods.items()
}
attrdict.update(hooked_methods)
return super().__new__(cls, name, bases, attrdict)
@staticmethod
def _check_names(methods):
hook_names = set(_HOOK_METHODS.keys())
method_names = set(methods.keys())
if not method_names.issubset(hook_names):
raise exception.HookNameError(
"public method(s) with non-hook name: {}".format(
", ".join(method_names - hook_names)
)
)
@staticmethod
def _extract_public_methods(attrdict):
return {
key: value
for key, value in attrdict.items()
if callable(value) and not key.startswith("_")
}
class Plugin(metaclass=_PluginMeta):
"""Base class for plugin classes. For plugin classes to be picked up by
``repobee``, they must inherit from this class.
Public methods must be hook methods, i.e. implement the specification of
one of the hooks defined in :py:mod:`~repobee_plug.corehooks.PeerReviewHook`
or :py:mod:`~repobee_plug.exthooks.CloneHook`. If there are any other
public methods, an error is raised on class creation. As long as the method
has the correct name, it will be recognized as a hook method.
The signature of the method is not checked until the hook is registered by
the :py:const:`repobee_plug.manager` (an instance of
:py:class:`pluggy.manager.PluginManager`). Therefore, when testing a plugin,
it is a good idea to include a test where it is registered with the manager
to ensure that it has the correct signatures.
Private methods (i.e. methods prefixed with ``_``) carry no such
restrictions.
""" | /repobee_plug-0.12.0a1-py3-none-any.whl/repobee_plug/pluginmeta.py | 0.88531 | 0.174656 | pluginmeta.py | pypi |


[](https://codecov.io/gh/repobee/repobee)
[](http://repobee.readthedocs.io/en/stable/)
[](https://badge.fury.io/py/repobee)


[](LICENSE)
[](https://github.com/ambv/black)
## Overview
RepoBee is a command line tool that allows teachers and teaching assistants to
work with large amounts of student Git repositories on the GitHub, GitLab and
Gitea platforms (cloud and self-hosted). The archetypical use case is to
automate creation of student repositories based on template repositories, that
can contain for example instructions and skeleton code. Given any number of
template repositories, creating a copy for each student or group is
[just one command away](https://docs.repobee.org/en/stable/repos.html#set-up-student-repositories-the-setup-action).
RepoBee also has functionality for
[updating student repos](https://docs.repobee.org/en/stable/repos.html#updating-student-repositories-the-update-action),
[batch cloning of student repos](https://docs.repobee.org/en/stable/repos.html#cloning-repos-in-bulk-the-clone-action),
[opening, closing and listing issues](https://docs.repobee.org/en/stable/issues.html),
[no-blind](https://docs.repobee.org/en/stable/peer.html) and
[double-blind](https://docs.repobee.org/en/stable/peer.html#double-blind-peer-review)
peer review, and much more!
In addition, RepoBee features a powerful
[plugin system](https://docs.repobee.org/en/stable/plugins.html) that allows
users to either use existing plugins, or
[write their own](https://docs.repobee.org/en/stable/repobee_plug/index.html).
Plugins can do a wide range of things, including making RepoBee compatible with
multiple hosting platforms (GitHub, GitLab, Gitea), providing compatibility
with repositories managed by GitHub Classroom, or running JUnit4 test classes
on cloned student repositories.
Still not quite sure what RepoBee actually does? The demo video below briefly
explains some of the most important concepts, and showcases how RepoBee can be
used to setup and clone student repositories, as well as how to write a simple
plugin.
https://user-images.githubusercontent.com/14223379/121573132-2d725380-ca25-11eb-8aa0-8f50ac3f28f0.mp4
> Short video demonstration of using RepoBee and writing a simple plugin. [For a higher-quality version of this demo, click this link!](https://repobee.org/media/repobee-demo.mp4)
### Feature highlights
RepoBee has a lot going for it. Here are some of the things we are most proud
of:
* Compatible with GitHub, GitLab and Gitea: No platform lock-in!
* Support for group assignments (multiple students per repository)
* No-blind and double-blind peer review, directly on the hosting platform
* Generate, clone and update student repositories based on templates
* Open, close and list issues
* Extend and customize RepoBee with the
[plugin system](https://repobee.readthedocs.io/en/stable/plugins.html)
* Very little configuration required on the Git service platform side
- The only requirement is to have an Organization/Group with private repository
capabilities!
* No local configuration required
- Although [setting a few defaults](https://docs.repobee.org/en/stable/getting_started.html#configure-repobee-for-the-target-organization-the-config-category)
is highly recommended
For a full list of RepoBee's built-in (i.e. non-plugin) features, see the
[user guide](https://docs.repobee.org/en/stable/userguide.html) and
[CLI reference](https://docs.repobee.org/en/stable/cli.html).
### Getting started
First, either [install RepoBee](#install) or grab the [Docker image](#docker).
Then, start exploring the [Docs](https://repobee.readthedocs.io/en/stable/),
where you (among other things) will find the [user
guide](https://repobee.readthedocs.io/en/stable/userguide.html). It covers use
of RepoBee's various commands by way of practical example, and should set you
on the right path with little effort.
We also provide a [Gitea test instance](https://gitea.repobee.org) for playing
around around with RepoBee in an environment where messing up has no
consequence. See the
[RepoBee Gitea docs](https://docs.repobee.org/en/stable/gitea.html) for details
on how to use RepoBee with Gitea.
### Install
We provide an install script that sets up an isolated and correctly configured
environment for RepoBee, granted that you have [Python 3.7+ and Git
installed](https://docs.repobee.org/en/stable/install.html#requirements). The script
supports macOS, Linux and Windows Subsystem for Linux (WSL). You can execute it
directly using `curl`, with either `bash` or `zsh`.
> **IMPORTANT:** If you use any other shell than bash or zsh, you must still
> execute the install script with one of them.
```bash
$ bash <(curl -s https://repobee.org/install.sh)
```
```bash
$ zsh <(curl -s https://repobee.org/install.sh)
```
For additional details, please see [the install
docs](https://docs.repobee.org/en/stable/install.html).
### Docker
We offer a fully featured
[Docker image](https://docs.repobee.org/en/stable/docker.html#docker-usage)
that can be used instead of installing RepoBee.
## Versioning
As of December 17th 2018, RepoBee's CLI is a stable release and adheres to
[Semantic Versioning 2.0.0](https://semver.org/spec/v2.0.0.html). The internals
of RepoBee _do not_ adhere to this versioning, so using RepoBee as a library
is not recommended.
The plugin system is mostly stable as of RepoBee 3.0, but there is a slight
risk of breakage due to unforeseen problems. **If you develop a plugin, please
get in touch so that can be taken into consideration if breaking changes are
introduced to the plugin system**.
## License
This software is licensed under the MIT License. See the [LICENSE](LICENSE)
file for specifics.
## Citing RepoBee in an academic context
If you want to reference RepoBee in a paper, please cite the following paper:
> Simon Larsén and Richard Glassey. 2019. RepoBee: Developing Tool Support for
> Courses using Git/GitHub. In Proceedings of the 2019 ACM Conference on
> Innovation and Technology in Computer Science Education (ITiCSE '19). ACM,
> New York, NY, USA, 534-540. DOI: https://doi.org/10.1145/3304221.3319784
| /repobee-3.8.1.tar.gz/repobee-3.8.1/README.md | 0.501709 | 0.865565 | README.md | pypi |
import os
import enum
import pathlib
import shutil
import sys
import tempfile
from typing import Iterable, Generator, Union
import repobee_plug as plug
__all__ = ["DirectoryLayout"]
def _flat_repo_path(
base: pathlib.Path, repo: plug.StudentRepo
) -> pathlib.Path:
return base / repo.name
def _by_team_repo_path(
base: pathlib.Path, repo: plug.StudentRepo
) -> pathlib.Path:
return base / repo.team.name / repo.name
class DirectoryLayout(enum.Enum):
"""Layouts for arranging repositories on disk."""
FLAT = "flat"
BY_TEAM = "by-team"
def __init__(self, label: str):
self.label = label
self.get_repo_path = {
"flat": _flat_repo_path,
"by-team": _by_team_repo_path,
}[label]
def __str__(self):
return str(self.label)
def find_files_by_extension(
root: Union[str, pathlib.Path], *extensions: str
) -> Generator[pathlib.Path, None, None]:
"""Find all files with the given file extensions, starting from root.
Args:
root: The directory to start searching.
extensions: One or more file extensions to look for.
Returns:
a generator that yields a Path objects to the files.
"""
if not extensions:
raise ValueError("must provide at least one extension")
for cwd, _, files in os.walk(root):
for file in files:
if _ends_with_ext(file, extensions):
yield pathlib.Path(cwd) / file
def _ends_with_ext(
path: Union[str, pathlib.Path], extensions: Iterable[str]
) -> bool:
_, ext = os.path.splitext(str(path))
return ext in extensions
def atomic_write(content: str, dst: pathlib.Path) -> None:
"""Write the given contents to the destination "atomically". Achieved by
writin in a temporary directory and then moving the file to the
destination.
Args:
content: The content to write to the new file.
dst: Path to the file.
"""
with tempfile.TemporaryDirectory() as tmpdir:
with tempfile.NamedTemporaryFile(
delete=False, dir=tmpdir, mode="w"
) as file:
file.write(content)
shutil.move(file.name, str(dst))
def read_issue_from_file(issue_path: str) -> plug.Issue:
"""Attempt to read an issue from a textfile. The first line of the file
is interpreted as the issue's title.
Args:
issue_path: Local path to textfile with an issue.
"""
if not os.path.isfile(issue_path):
raise ValueError(f"{issue_path} is not a file")
with open(issue_path, "r", encoding=sys.getdefaultencoding()) as file:
return plug.Issue(file.readline().strip(), file.read()) | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/_repobee/fileutil.py | 0.716516 | 0.156846 | fileutil.py | pypi |
import time
import functools
from typing import Mapping, Optional
import requests
import repobee_plug as plug
MODIFY_REQUEST_METHOD_NAMES = ("post", "put", "patch", "delete")
ALL_REQUEST_METHOD_NAMES = (
"get",
"options",
"head",
"post",
"put",
"patch",
"delete",
)
_ORIGINAL_REQUESTS_METHODS = {
method_name: getattr(requests, method_name)
for method_name in ALL_REQUEST_METHOD_NAMES + ("request",)
}
DEFAULT_INTERNET_CONNECTION_CHECK_URL = "https://repobee.org"
def rate_limit_modify_requests(
base_url: str, rate_limit_in_seconds: float
) -> None:
"""Apply a rate limit to all modifying requests (put, patch, delete, post)
going to the given base URL.
This is currently necessary at least for GitHub due to the newly introduced
secondary rate limits, see
https://docs.github.com/en/rest/guides/best-practices-for-integrators#dealing-with-secondary-rate-limits.
Args:
base_url: Base URL on which to rate limit modify requests.
rate_limit_in_seconds: Minimum amount of seconds between each modify
request.
"""
plug.log.debug(
f"Rate limiting modify requests to {1 / rate_limit_in_seconds} "
"requests per second"
)
last_modify_time = 0
original_request_method = requests.request
def request(method, url, *args, **kwargs):
nonlocal last_modify_time
if (
url.casefold().startswith(base_url.casefold())
and method.lower() in MODIFY_REQUEST_METHOD_NAMES
):
seconds_since_last_modify = time.time() - last_modify_time
if seconds_since_last_modify < rate_limit_in_seconds:
time.sleep(rate_limit_in_seconds - seconds_since_last_modify)
last_modify_time = time.time()
original_request_method(method, url, *args, **kwargs)
requests.request = request
requests.put = functools.partial(request, "put")
requests.patch = functools.partial(request, "patch")
requests.delete = functools.partial(request, "delete")
requests.post = functools.partial(request, "post")
def install_retry_after_handler() -> None:
"""Install a handler that interposes itself into HTTP requests and honors the
Retry-After header by sleeping for the desired amount of time.
"""
plug.log.debug("Installing Retry-After handler")
original_request_method = requests.request
def request_with_retry_after_handling(method, url, *args, **kwargs):
response = original_request_method(method, url, *args, **kwargs)
retry_after_raw = _get_value_case_insensitive(
"retry-after", response.headers
)
if not retry_after_raw:
return response
plug.log.warning(
f"Rate limited on request to {url}, retrying after {retry_after_raw}s"
)
retry_after = float(retry_after_raw)
time.sleep(retry_after)
return request_with_retry_after_handling(method, url, *args, **kwargs)
for method_name in ALL_REQUEST_METHOD_NAMES:
retry_aware_method = functools.partial(
request_with_retry_after_handling, method_name
)
setattr(requests, method_name, retry_aware_method)
requests.request = request_with_retry_after_handling
def _get_value_case_insensitive(
search_key: str, mapping: Mapping[str, str]
) -> Optional[str]:
normalized_mapping = {
key.casefold(): value for key, value in mapping.items()
}
return normalized_mapping.get(search_key.casefold())
def remove_rate_limits() -> None:
"""Remove any previously applied rate limits."""
plug.log.debug("Removing rate limits")
for method_name, original_method in _ORIGINAL_REQUESTS_METHODS.items():
setattr(requests, method_name, original_method)
def is_internet_connection_available(
test_url=DEFAULT_INTERNET_CONNECTION_CHECK_URL,
) -> bool:
"""Test if an internet connection is available.
Args:
test_url: A URL to try to GET.
"""
try:
return requests.get(test_url) is not None
except requests.exceptions.ConnectionError:
return False | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/_repobee/http.py | 0.837952 | 0.182389 | http.py | pypi |
import argparse
import dataclasses
from typing import Optional
from _repobee import constants
__all__ = [
"RepobeeParser",
"OrderedFormatter",
"BaseParsers",
"add_debug_args",
"CATEGORY_DEST",
"ACTION_DEST",
]
CATEGORY_DEST = "category"
ACTION_DEST = "action"
@dataclasses.dataclass
class BaseParsers:
base_parser: argparse.ArgumentParser
student_parser: argparse.ArgumentParser
template_org_parser: argparse.ArgumentParser
repo_name_parser: argparse.ArgumentParser
repo_discovery_parser: argparse.ArgumentParser
class RepobeeParser(argparse.ArgumentParser):
"""A thin wrapper around :py:class:`argparse.ArgumentParser`. The primary
functionality of this class is to group the core CLI arguments into
argument groups such that the CLI doesn't get too cluttered.
"""
def __init__(self, *args, is_core_command: bool = False, **kwargs):
self._is_core_command = is_core_command
super().__init__(*args, **kwargs)
self._platform_args_grp = self.add_argument_group(
title="platform arguments",
description="Arguments related to the platform "
"(e.g. GitHub or GitLab)",
)
self._debug_args_grp = self.add_argument_group(title="debug arguments")
self._alpha_args_grp = self.add_argument_group(
title="alpha arguments",
description="Arguments that are currently being trialed in alpha, "
"and may change without further notice",
)
def add_argument(self, *args, **kwargs):
"""Add an argument to this parser, placing it in an appropriate
argument group.
"""
if not self._is_core_command:
return super().add_argument(*args, **kwargs)
platform_args = {
"--token",
"--org-name",
"--template-org-name",
"--user",
"--base-url",
}
debug_args = {"--traceback", "--quiet"}
alpha_args = {"--hook-results-file", "--double-blind-key"}
for arg in args:
if arg in platform_args:
return self._platform_args_grp.add_argument(*args, **kwargs)
elif arg in debug_args:
return self._debug_args_grp.add_argument(*args, **kwargs)
elif arg in alpha_args:
return self._alpha_args_grp.add_argument(*args, **kwargs)
return super().add_argument(*args, **kwargs)
def add_argument_group( # type: ignore
self, title: Optional[str] = None, description: Optional[str] = None
) -> argparse._ArgumentGroup:
"""Create a new argument group if the title does not exist, or return
an existing one if it does.
"""
for grp in self._action_groups:
if grp.title == title:
if description is not None:
grp.description = description
return grp
return super().add_argument_group(title, description)
class OrderedFormatter(argparse.HelpFormatter):
"""A formatter class for putting out the help section in a proper order.
All of the arguments that are configurable in the configuration file
should appear at the bottom (in arbitrary, but always the same, order).
Any other arguments should appear in the order they are added.
The internals of the formatter classes are technically not public,
so this class is "unsafe" when it comes to new versions of Python. It may
have to be disabled for future versions, but it works for 3.6, 3.7 and 3.8
at the time of writing. If this turns troublesome, it may be time to
switch to some other CLI library.
"""
def add_arguments(self, actions):
"""Order actions by the name of the long argument, and then add them
as arguments.
The order is the following:
[ NON-CONFIGURABLE | CONFIGURABLE | DEBUG ]
Non-configurable arguments added without modification, which by
default is the order they are added to the parser. Configurable
arguments are added in the order defined by
:py:const:`constants.ORDERED_CONFIGURABLE_ARGS`. Finally, debug
commands (such as ``--traceback``) are added in arbitrary (but
consistent) order.
"""
args_order = tuple(
"--" + name.replace("_", "-")
for name in constants.ORDERED_CONFIGURABLE_ARGS
) + ("--traceback",)
def key(action):
if len(action.option_strings) < 2:
return -1
long_arg = action.option_strings[1]
if long_arg in args_order:
return args_order.index(long_arg)
return -1
actions = sorted(actions, key=key)
super().add_arguments(actions)
def add_debug_args(parser: argparse.ArgumentParser) -> None:
"""Add RepoBee's standard debug arguments to this parser.
Args:
parser: A parser to add arguments to.
"""
parser.add_argument(
"--tb",
"--traceback",
help="show the full traceback of critical exceptions",
action="store_true",
dest="traceback",
)
parser.add_argument(
"-q",
"--quiet",
help="silence output (stacks up to 3 times: x1=only warnings "
"and errors, x2=only errors, x3=complete and utter silence)",
action="count",
default=0,
) | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/_repobee/cli/argparse_ext.py | 0.871966 | 0.167559 | argparse_ext.py | pypi |
import argparse
from typing import List, Tuple
import repobee_plug as plug
from repobee_plug.cli import categorization
from _repobee.cli import argparse_ext
def add_plugin_parsers(
subparsers: argparse._SubParsersAction,
base_parsers: argparse_ext.BaseParsers,
parsers_mapping: dict,
config: plug.Config,
):
"""Add parsers defined by plugins."""
command_plugins = [
p
for p in plug.manager.get_plugins()
if isinstance(p, plug.cli.Command)
]
for cmd in command_plugins:
_attach_command(cmd, base_parsers, subparsers, parsers_mapping, config)
command_extension_plugins = [
p
for p in plug.manager.get_plugins()
if isinstance(p, plug.cli.CommandExtension)
]
for cmd_ext in command_extension_plugins:
for action in cmd_ext.__settings__.actions:
parser = parsers_mapping[action]
cmd_ext.attach_options(config=config, parser=parser)
def _attach_command(
cmd: plug.cli.Command,
base_parsers: argparse_ext.BaseParsers,
subparsers: argparse._SubParsersAction,
parsers_mapping: dict,
config: plug.Config,
) -> None:
category, action, is_category_action = _resolve_category_and_action(cmd)
parents = _compose_parent_parsers(cmd, base_parsers)
if category and category not in parsers_mapping and not is_category_action:
parsers_mapping[category] = _create_category_parser(
category, subparsers
)
assert action not in parsers_mapping, f"{action} already exists"
settings = cmd.__settings__
ext_parser = _create_action_parser(
cmd=cmd,
action=action,
is_category_action=is_category_action,
parsers_mapping=parsers_mapping,
subparsers=subparsers,
parents=parents,
)
cmd.attach_options(config=config, parser=ext_parser)
settings_dict = settings._asdict()
settings_dict.update(dict(action=action, category=category))
cmd.__settings__ = settings.__class__(**settings_dict)
parsers_mapping[action] = ext_parser
def _resolve_category_and_action(
cmd: plug.cli.Command,
) -> Tuple[categorization.Category, categorization.Action, bool]:
settings = cmd.__settings__
category = (
settings.action.category
if isinstance(settings.action, categorization.Action)
else settings.category
)
action = settings.action or cmd.__class__.__name__.lower().replace(
"_", "-"
)
if isinstance(action, str):
is_category_action = False
if not category:
is_category_action = True
category = plug.cli.category(name=action, action_names=[action])
return (
category,
(
category[action]
if category and action in category
else categorization.Action(name=action, category=category)
),
is_category_action,
)
else:
return category, action, False
def _compose_parent_parsers(
cmd: plug.cli.Command, bases: argparse_ext.BaseParsers
):
parents = []
bp = plug.BaseParser
req_parsers = cmd.__settings__.base_parsers or []
if cmd.__requires_api__() or bp.BASE in req_parsers:
parents.append(bases.base_parser)
if bp.STUDENTS in req_parsers:
parents.append(bases.student_parser)
if bp.TEMPLATE_ORG in req_parsers:
parents.append(bases.template_org_parser)
if bp.REPO_DISCOVERY in req_parsers:
parents.append(bases.repo_discovery_parser)
elif bp.ASSIGNMENTS in req_parsers:
parents.append(bases.repo_name_parser)
return parents
def _create_category_parser(
category: categorization.Category, subparsers: argparse._SubParsersAction
) -> argparse._SubParsersAction:
category_cmd = subparsers.add_parser(
name=category.name,
help=category.help,
description=category.description,
)
category_parser = category_cmd.add_subparsers(
dest=argparse_ext.ACTION_DEST
)
category_parser.required = True
return category_parser
def _create_action_parser(
cmd: plug.cli.Command,
action: categorization.Action,
is_category_action: bool,
parsers_mapping: dict,
subparsers: argparse._SubParsersAction,
parents: List[argparse.ArgumentParser],
):
settings = cmd.__settings__
ext_parser = (
parsers_mapping.get(action.category) or subparsers
).add_parser(
action.name,
help=settings.help,
description=settings.description,
parents=parents,
formatter_class=argparse_ext.OrderedFormatter,
)
try:
argparse_ext.add_debug_args(ext_parser)
except argparse.ArgumentError:
pass
_add_metainfo_args(
ext_parser=ext_parser,
action=action,
cmd=cmd,
is_category_action=is_category_action,
)
return ext_parser
def _add_metainfo_args(
ext_parser: argparse.ArgumentParser,
action: categorization.Action,
cmd: plug.cli.Command,
is_category_action: bool,
) -> None:
try:
# this will fail if we are adding arguments to an existing command
ext_parser.add_argument(
"--repobee-action",
action="store_const",
help=argparse.SUPPRESS,
const=action.name,
default=action.name,
dest="action",
)
# This is a little bit of a dirty trick. It allows us to easily
# find the associated extension command when parsing the arguments.
ext_parser.add_argument(
"--repobee-extension-command",
action="store_const",
help=argparse.SUPPRESS,
const=cmd,
default=cmd,
dest="_extension_command",
)
except argparse.ArgumentError:
pass
if is_category_action:
# category is not specified, so it's a category-action
ext_parser.add_argument(
"--repobee-category",
action="store_const",
help=argparse.SUPPRESS,
const=action.category,
default=action.category,
dest="category",
) | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/_repobee/cli/pluginparsers.py | 0.69285 | 0.16378 | pluginparsers.py | pypi |
import asyncio
import dataclasses
import enum
import pathlib
import shutil
import subprocess
from typing import List, Iterable, Tuple
import repobee_plug as plug
from _repobee import exception, urlutil
from _repobee.git._local import git_init, stash_changes
from _repobee.git._util import batch_execution, warn_local_repos, is_git_repo
@dataclasses.dataclass(frozen=True)
class CloneSpec:
dest: pathlib.Path
repo_url: str
branch: str = ""
metadata: dict = dataclasses.field(default_factory=dict)
async def _clone_async(clone_spec: CloneSpec):
"""Clone git repositories asynchronously.
Args:
clone_spec: A clone specification.
"""
rc, stderr = await pull_clone_async(clone_spec)
empty_repo_error = b"fatal: couldn't find remote ref HEAD"
if rc != 0 and empty_repo_error.lower() not in stderr.lower():
raise exception.CloneFailedError(
f"Failed to clone {clone_spec.repo_url}",
returncode=rc,
stderr=stderr,
clone_spec=clone_spec,
)
else:
plug.log.info(f"Cloned into {clone_spec.repo_url}")
async def pull_clone_async(clone_spec: CloneSpec):
"""Simulate a clone with a pull to avoid writing remotes (that could
include secure tokens) to disk.
"""
ensure_repo_dir_exists(clone_spec)
pull_command = (
f"git pull {clone_spec.repo_url} "
f"{clone_spec.branch or ''}".strip().split()
)
proc = await asyncio.create_subprocess_exec(
*pull_command,
cwd=str(clone_spec.dest),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = await proc.communicate()
return proc.returncode, stderr
def ensure_repo_dir_exists(clone_spec: CloneSpec) -> None:
"""Checks if a dir for the repo url exists, and if it does not, creates it.
Also initializes (or reinitializes, if it already exists) as a git repo.
"""
if not clone_spec.dest.exists():
clone_spec.dest.mkdir(parents=True)
if not is_git_repo(str(clone_spec.dest)):
git_init(clone_spec.dest)
def clone(clone_specs: Iterable[CloneSpec]) -> List[CloneSpec]:
"""Clone all repos asynchronously.
Args:
clone_specs: Clone specifications for repos to clone.
cwd: Working directory. Defaults to the current directory.
Returns:
Specs for which the cloning failed.
"""
return [
exc.clone_spec
for exc in batch_execution(_clone_async, clone_specs)
if isinstance(exc, exception.CloneFailedError)
]
def update_local_repos(
local: List[plug.StudentRepo], api: plug.PlatformAPI
) -> None:
expected_basedir = local[0].path.parent.parent
assert all(
map(lambda repo: repo.path.parent.parent == expected_basedir, local)
)
stash_changes(local)
specs = [
CloneSpec(repo_url=api.insert_auth(repo.url), dest=repo.path)
for repo in local
]
# TODO figure out what to do when a local update fails
clone(specs)
class CloneStatus(enum.Enum):
CLONED = enum.auto()
EXISTED = enum.auto()
FAILED = enum.auto()
def clone_student_repos(
repos: List[plug.StudentRepo],
clone_dir: pathlib.Path,
update_local: bool,
api: plug.PlatformAPI,
) -> Iterable[Tuple[CloneStatus, plug.StudentRepo]]:
assert all(map(lambda r: r.path is not None, repos))
local = [repo for repo in repos if repo.path.exists()]
if local and update_local:
update_local_repos(local, api)
elif local and not update_local:
warn_local_repos(local)
non_local = [repo for repo in repos if not repo.path.exists()]
plug.log.info(f"Cloning into {non_local}")
non_local_specs = [
CloneSpec(
dest=clone_dir / plug.fileutils.hash_path(repo.path),
repo_url=api.insert_auth(repo.url),
metadata=dict(repo=repo),
)
for repo in non_local
]
failed_specs = clone(non_local_specs)
failed_repos = {spec.metadata["repo"] for spec in failed_specs}
success_repos = {repo for repo in non_local if repo not in failed_repos}
for repo in success_repos:
shutil.copytree(
src=clone_dir / plug.fileutils.hash_path(repo.path), dst=repo.path
)
return (
[(CloneStatus.EXISTED, repo) for repo in local]
+ [(CloneStatus.CLONED, repo) for repo in success_repos]
+ [(CloneStatus.FAILED, repo) for repo in failed_repos]
)
def clone_single(repo_url: str, branch: str = "", cwd: str = "."):
"""Clone a git repository with ``git clone``.
This should only be used for temporary cloning, as any secure tokens in the
repo URL are stored in the repository.
Args:
repo_url: HTTPS url to repository on the form
https://<host>/<owner>/<repo>.
branch: The branch to clone.
cwd: Working directory. Defaults to the current directory.
"""
command = [*"git clone --single-branch".split(), repo_url] + (
[branch] if branch else []
)
process = subprocess.run(command, cwd=cwd, capture_output=True)
if process.returncode != 0:
raise exception.CloneFailedError(
"Failed to clone",
process.returncode,
process.stderr,
CloneSpec(
repo_url=repo_url,
dest=pathlib.Path(cwd) / urlutil.extract_repo_name(repo_url),
branch=branch,
),
) | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/_repobee/git/_fetch.py | 0.643441 | 0.214815 | _fetch.py | pypi |
import asyncio
import os
import pathlib
import sys
from typing import Callable, Awaitable, Iterable, Any, Sequence, List, Union
import more_itertools
import repobee_plug as plug
from _repobee import exception
def batch_execution(
batch_func: Callable[..., Awaitable],
arg_list: Iterable[Any],
*batch_func_args,
**batch_func_kwargs,
) -> Sequence[Exception]:
"""Take a batch function (any function whose first argument is an iterable)
and send in CONCURRENT_TASKS amount of arguments from the arg_list
until it is exhausted. The batch_func_kwargs are provided on each call.
Args:
batch_func: A function that takes an iterable as a first argument and
returns a list of asyncio.Task objects.
arg_list: A list of objects that are of the same type as the
batch_func's first argument.
batch_func_kwargs: Additional keyword arguments to the batch_func.
Returns:
a list of exceptions raised in the tasks returned by the batch
function.
"""
loop = _get_event_loop()
return loop.run_until_complete(
batch_execution_async(
batch_func, arg_list, *batch_func_args, **batch_func_kwargs
)
)
async def batch_execution_async(
batch_func: Callable[..., Awaitable],
arg_list: Iterable[Any],
*batch_func_args,
**batch_func_kwargs,
) -> Sequence[Exception]:
import tqdm.asyncio # type: ignore
exceptions = []
loop = _get_event_loop()
concurrent_tasks = 20
for batch, args_chunk in enumerate(
more_itertools.ichunked(arg_list, concurrent_tasks), start=1
):
tasks = [
loop.create_task(
batch_func(arg, *batch_func_args, **batch_func_kwargs)
)
for arg in args_chunk
]
for coro in tqdm.asyncio.tqdm_asyncio.as_completed(
tasks, desc=f"Progress batch {batch}", file=sys.stdout
):
try:
await coro
except exception.GitError as exc:
exceptions.append(exc)
for e in exceptions:
plug.log.error(str(e))
return exceptions
def warn_local_repos(local: List[plug.StudentRepo]):
local_repo_ids = [f"{repo.team.name}/{repo.name}" for repo in local]
plug.log.warning(
f"Found local repos, skipping: {', '.join(local_repo_ids)}"
)
def is_git_repo(path: Union[str, pathlib.Path]) -> bool:
"""Check if a directory has a .git subdirectory.
Args:
path: Path to a local directory.
Returns:
True if there is a .git subdirectory in the given directory.
"""
return os.path.isdir(path) and ".git" in os.listdir(path)
def _get_event_loop() -> asyncio.AbstractEventLoop:
if sys.version_info[:2] < (3, 10):
return asyncio.get_event_loop()
try:
return asyncio.get_running_loop()
except RuntimeError:
return asyncio.new_event_loop() | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/_repobee/git/_util.py | 0.655997 | 0.287693 | _util.py | pypi |
import asyncio
import dataclasses
import os
import pathlib
import subprocess
import sys
from typing import Iterable, List, Tuple
import repobee_plug as plug
from _repobee import exception
from _repobee.git._util import batch_execution
@dataclasses.dataclass(frozen=True)
class PushSpec:
local_path: pathlib.Path
repo_url: str
branch: str
metadata: dict = dataclasses.field(default_factory=dict)
def __iter__(self):
"""Iter implementation just to make this dataclass unpackable."""
return iter((self.local_path, self.repo_url, self.branch))
async def _push_async(pt: PushSpec):
"""Asynchronous call to git push, pushing directly to the repo_url and branch.
Args:
pt: A Push namedtuple.
"""
command = ["git", "push", pt.repo_url, pt.branch]
proc = await asyncio.create_subprocess_exec(
*command,
cwd=os.path.abspath(pt.local_path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = await proc.communicate()
if proc.returncode != 0:
raise exception.PushFailedError(
f"Failed to push to {pt.repo_url}",
proc.returncode or -sys.maxsize,
stderr,
pt.repo_url,
)
elif b"Everything up-to-date" in stderr:
plug.log.info(f"{pt.repo_url} is up-to-date")
else:
plug.log.info(f"Pushed files to {pt.repo_url} {pt.branch}")
def push(
push_tuples: Iterable[PushSpec], tries: int = 3
) -> Tuple[List[PushSpec], List[PushSpec]]:
"""Push to all repos defined in push_tuples asynchronously. Amount of
concurrent tasks is limited by CONCURRENT_TASKS. Pushing to repos is tried
a maximum of ``tries`` times (i.e. pushing is _retried_ ``tries - 1``
times.)
Args:
push_tuples: Push namedtuples defining local and remote repos.
tries: Amount of times to try to push (including initial push).
Returns:
A tuple of lists of push tuples on the form (successful, failures).
"""
if tries < 1:
raise ValueError("tries must be larger than 0")
push_tuples = list(push_tuples)
# confusing, but failed_pts needs an initial value
failed_pts = list(push_tuples)
for i in range(tries):
plug.log.info(f"Pushing, attempt {i + 1}/{tries}")
failed_urls = set(push_no_retry(failed_pts))
failed_pts = [pt for pt in failed_pts if pt.repo_url in failed_urls]
if not failed_pts:
break
plug.log.warning(f"{len(failed_pts)} pushes failed ...")
successful_pts = [pt for pt in push_tuples if pt not in failed_pts]
return successful_pts, failed_pts
def push_no_retry(push_tuples: Iterable[PushSpec]) -> List[str]:
"""Push to all repos defined in push_tuples asynchronously. Amount of
concurrent tasks is limited by CONCURRENT_TASKS.
Pushes once and only once to each repo.
Args:
push_tuples: Push namedtuples defining local and remote repos.
Returns:
urls to which pushes failed with exception.PushFailedError. Other
errors are only logged.
"""
return [
exc.url
for exc in batch_execution(_push_async, push_tuples)
if isinstance(exc, exception.PushFailedError)
] | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/_repobee/git/_push.py | 0.683102 | 0.182681 | _push.py | pypi |
import dataclasses
import itertools
import collections
import re
import tempfile
import pathlib
import shutil
import sys
import json
import functools
from typing import Iterable, Optional, Dict, List, Tuple, Set, Union, Callable
import git # type: ignore
import repobee_plug as plug
import _repobee.command.teams
import _repobee.git
import _repobee.ext.gitea
import _repobee.hash
import _repobee.exception
from _repobee import formatters
from _repobee import featflags
from _repobee.command import progresswrappers
DEFAULT_REVIEW_ISSUE = plug.Issue(
title="Peer review",
body="You have been assigned to peer review this repo.",
)
_DEFAULT_BRANCH = "master"
def assign_peer_reviews(
assignment_names: Iterable[str],
teams: Iterable[plug.StudentTeam],
num_reviews: int,
issue: Optional[plug.Issue],
double_blind_key: Optional[str],
api: plug.PlatformAPI,
) -> None:
"""Assign peer reviewers among the students to each student repo. Each
student is assigned to review num_reviews repos, and consequently, each
repo gets reviewed by num_reviews reviewers.
In practice, each student repo has a review team generated (called
<student-repo-name>-review), to which num_reviews _other_ students are
assigned. The team itself is given pull-access to the student repo, so
that reviewers can view code and open issues, but cannot modify the
contents of the repo.
Args:
assignment_names: Names of assginments.
teams: Team objects specifying student groups.
num_reviews: Amount of reviews each student should perform
(consequently, the amount of reviews of each repo)
issue: An issue with review instructions to be opened in the considered
repos.
double_blind_key: If provided, use key to make double-blind review
allocation.
api: An implementation of :py:class:`repobee_plug.PlatformAPI` used to
interface with the platform (e.g. GitHub or GitLab) instance.
"""
issue = issue or DEFAULT_REVIEW_ISSUE
fetched_repo_by_name = _fetch_repos_for_review(
assignment_names, teams, double_blind_key, api
)
assigned_reviews = []
for assignment_name in assignment_names:
plug.echo(f"Allocating reviews for {assignment_name}")
create_review_team_name = functools.partial(
_review_team_name, assignment=assignment_name, key=double_blind_key
)
def get_repo_by_team_name(team_name: str) -> plug.Repo:
repo_name = plug.generate_repo_name(team_name, assignment_name)
return fetched_repo_by_name[repo_name]
review_teams_with_repos = _create_review_teams(
teams,
num_reviews,
get_repo_by_team_name,
create_review_team_name,
api,
)
for review_team_with_repo in review_teams_with_repos:
_assign_review(
review_team_with_repo.review_team,
review_team_with_repo.repo_to_review,
issue,
api,
)
assigned_reviews.append(review_team_with_repo)
if featflags.is_feature_enabled(
featflags.FeatureFlag.REPOBEE_4_REVIEW_COMMANDS
):
output = dict(
allocations=[a.to_dict() for a in assigned_reviews],
num_reviews=num_reviews,
)
pathlib.Path("review_allocations.json").write_text(
json.dumps(output, indent=4),
encoding=sys.getdefaultencoding(),
)
@dataclasses.dataclass(frozen=True)
class _ReviewTeamWithRepo:
review_team: plug.Team
repo_to_review: plug.Repo
def to_dict(self) -> dict:
return {
"reviewed_repo": {
"name": self.repo_to_review.name,
"url": self.repo_to_review.url,
},
"review_team": {
"name": self.review_team.name,
"members": self.review_team.members,
},
}
def _create_review_teams(
teams: Iterable[plug.StudentTeam],
num_reviews: int,
get_repo_by_team_name: Callable[[str], plug.Repo],
create_review_team_name: Callable[[str], str],
api: plug.PlatformAPI,
) -> Iterable[_ReviewTeamWithRepo]:
allocations = plug.manager.hook.generate_review_allocations(
teams=teams, num_reviews=num_reviews
)
review_team_specifications = [
plug.StudentTeam(
members=allocation.review_team.members,
name=create_review_team_name(allocation.reviewed_team),
)
for allocation in allocations
]
review_teams_progress = _create_review_teams_with_progress_bar(
review_team_specifications, api
)
for review_team, allocation in zip(review_teams_progress, allocations):
repo_to_review = get_repo_by_team_name(allocation.reviewed_team.name)
review_teams_progress.write( # type: ignore
f"Assigning {' and '.join(review_team.members)} "
f"to review {repo_to_review.name}"
)
yield _ReviewTeamWithRepo(review_team, repo_to_review)
def _create_review_teams_with_progress_bar(
review_team_specs: List[plug.StudentTeam], api: plug.PlatformAPI
) -> Iterable[plug.Team]:
review_teams = _repobee.command.teams.create_teams(
review_team_specs, plug.TeamPermission.PULL, api
)
return plug.cli.io.progress_bar(
review_teams,
desc="Creating review teams",
total=len(review_team_specs),
)
def _assign_review(
review_team: plug.Team,
reviewed_repo: plug.Repo,
issue: plug.Issue,
api: plug.PlatformAPI,
) -> None:
api.assign_repo(review_team, reviewed_repo, plug.TeamPermission.PULL)
api.create_issue(
issue.title,
issue.body,
reviewed_repo,
# It's not possible to assign users with read-access in Gitea
# FIXME redesign so Gitea does not require special handling
assignees=review_team.members
if not isinstance(api, _repobee.ext.gitea.GiteaAPI)
else None,
)
def _fetch_repos_for_review(
assignment_names: Iterable[str],
teams: Iterable[plug.StudentTeam],
double_blind_key: Optional[str],
api: plug.PlatformAPI,
) -> Dict[str, plug.Repo]:
fetched_teams = progresswrappers.get_teams(
teams, api, desc="Fetching teams and repos"
)
team_repo_tuples = [
(team, list(api.get_team_repos(team))) for team in fetched_teams
]
expected_repo_names = set(
plug.generate_repo_names(teams, assignment_names)
)
repo_name_to_fetched_repo = _fetch_repos(
team_repo_tuples, expected_repo_names
)
if double_blind_key:
repo_name_to_fetched_repo = _anonymize_fetched_repos(
team_repo_tuples, expected_repo_names, double_blind_key, api
)
return repo_name_to_fetched_repo
def _only_expected_repos(
repos: List[plug.Repo], expected_repo_names: Set[str]
) -> List[plug.Repo]:
return [repo for repo in repos if repo.name in expected_repo_names]
def _anonymize_fetched_repos(
team_repo_tuples: List[Tuple[plug.Team, List[plug.Repo]]],
expected_repo_names: Set[str],
key: str,
api: plug.PlatformAPI,
) -> Dict[str, plug.Repo]:
plug.log.info(f"Creating anonymous repos with key: {key}")
return _create_anonymized_repos(
[
(team, _only_expected_repos(repos, expected_repo_names))
for team, repos in team_repo_tuples
],
key,
api,
)
def _create_anonymized_repos(
team_repo_tuples: List[Tuple[plug.Team, List[plug.Repo]]],
key: str,
api: plug.PlatformAPI,
) -> Dict[str, plug.Repo]:
"""Create anonymous copies of the given repositories, push them to the
platform and return a mapping from repo name to platform repo.
"""
with tempfile.TemporaryDirectory() as tmp_clone_dir, tempfile.TemporaryDirectory() as tmp_workdir: # noqa
workdir = pathlib.Path(tmp_workdir)
clone_dir = pathlib.Path(tmp_clone_dir)
student_repos = _clone_to_student_repos(
team_repo_tuples, workdir, clone_dir, api
)
student_repos_iter = plug.cli.io.progress_bar(
student_repos, desc="Creating anonymized repos"
)
repo_mapping = {}
anonymized_repos = []
for student_repo in student_repos_iter:
anon_student_repo, anon_platform_repo = _create_anonymized_repo(
student_repo, key, api
)
anonymized_repos.append(anon_student_repo)
repo_mapping[student_repo.name] = anon_platform_repo
_push_to_platform(anonymized_repos, api)
return repo_mapping
def _fetch_repos(
team_repo_tuples: List[Tuple[plug.Team, List[plug.Repo]]],
expected_repo_names: Set[str],
) -> Dict[str, plug.Repo]:
repo_name_to_fetched_repo = {
fetched_repo.name: fetched_repo
for fetched_repo in itertools.chain.from_iterable(
repos for _, repos in team_repo_tuples
)
}
missing = expected_repo_names - repo_name_to_fetched_repo.keys()
if missing:
raise plug.NotFoundError(f"Can't find repos: {', '.join(missing)}")
return repo_name_to_fetched_repo
def _create_anonymized_repo(
student_repo: plug.StudentRepo, key: str, api: plug.PlatformAPI
) -> Tuple[plug.StudentRepo, plug.Repo]:
anon_repo_name = _hash_if_key(student_repo.name, key=key)
anon_review_team_name = _hash_if_key(student_repo.team.name, key=key)
fingerprint = _anonymous_repo_fingerprint(
anon_review_team_name, anon_repo_name
)
platform_repo = api.create_repo(
name=anon_repo_name,
description=f"Review copy. Fingerprint: {fingerprint}",
private=True,
)
_anonymize_commit_history(student_repo.path)
return (
plug.StudentRepo(
name=anon_repo_name,
team=student_repo.team,
url=student_repo.url.replace(student_repo.name, anon_repo_name),
_path=student_repo.path,
),
platform_repo,
)
def _anonymize_commit_history(repo_path: pathlib.Path) -> None:
shutil.rmtree(repo_path / ".git")
repo = git.Repo.init(repo_path)
repo.git.add(".", "--force")
repo.git.commit("-m", "Add project")
repo.git.checkout(_DEFAULT_BRANCH)
def _anonymous_repo_fingerprint(team_name: str, repo_name: str) -> str:
return _repobee.hash.hash(team_name + repo_name)
def _clone_to_student_repos(
team_repo_tuples: List[Tuple[plug.Team, List[plug.Repo]]],
workdir: pathlib.Path,
clone_dir: pathlib.Path,
api: plug.PlatformAPI,
) -> List[plug.StudentRepo]:
student_repos = [
plug.StudentRepo(
name=repo.name,
team=plug.StudentTeam(name=team.name, members=list(team.members)),
url=repo.url,
_path=workdir / team.name / repo.name,
)
for team, repos in team_repo_tuples
for repo in repos
]
list(
_repobee.git.clone_student_repos(
student_repos, clone_dir, update_local=False, api=api
)
)
return student_repos
def _push_to_platform(
student_repos: List[plug.StudentRepo], api: plug.PlatformAPI
) -> None:
push_tuples = [
_repobee.git.PushSpec(
repo.path, api.insert_auth(repo.url), _DEFAULT_BRANCH
)
for repo in student_repos
]
_repobee.git.push(push_tuples)
def _hash_if_key(s: str, key: Optional[str], max_hash_size: int = 20) -> str:
"""Hash the string with the key, if provided. Otherwise, return the input
string.
"""
return _repobee.hash.keyed_hash(s, key, max_hash_size) if key else s
def end_reviews(
assignment_names: Iterable[str],
students: Iterable[plug.StudentTeam],
double_blind_key: Optional[str],
api: plug.PlatformAPI,
) -> None:
"""Clean up review allocations.
If normal no-blind review has been performed (i.e. ``double_blind_key`` is
``None``), then only review teams are deleted. If ``double_blind_key`` is
provided, both review teams and anonymous repo copies are deleted.
Args:
assignment_names: Names of assignments.
students: An iterble of student teams.
double_blind_key: If not None, double-blind review is assumed and the
key is used to compute hashed review team names.
api: An implementation of :py:class:`repobee_plug.PlatformAPI` used to
interface with the platform (e.g. GitHub or GitLab) instance.
"""
review_team_names = [
_review_team_name(student, assignment_name, double_blind_key)
for student in students
for assignment_name in assignment_names
]
teams = progresswrappers.get_teams(
review_team_names, api, desc="Deleting review teams"
)
for team in teams:
api.delete_team(team)
plug.log.info(f"Deleted team {team.name}")
progresswrappers.end_progress(teams)
if double_blind_key:
_delete_anonymous_repos(
assignment_names, students, double_blind_key, api
)
def end_reviews_repobee_4(
allocations_file: pathlib.Path, api: plug.PlatformAPI
) -> None:
"""Preview version of RepoBee 4's version of :py:fync:`end_reviews`."""
review_teams_with_repos = json.loads(
allocations_file.read_text(sys.getdefaultencoding())
)["allocations"]
review_team_names = {
allocation["review_team"]["name"]
for allocation in review_teams_with_repos
}
for team in progresswrappers.get_teams(review_team_names, api):
api.delete_team(team)
def _delete_anonymous_repos(
assignment_names: Iterable[str],
student_teams: Iterable[plug.StudentTeam],
double_blind_key: str,
api: plug.PlatformAPI,
):
"""Delete any anonymous repos created for these students and
assignments.
"""
anon_repo_names = [
_hash_if_key(
plug.generate_repo_name(student_team, assignment_name),
key=double_blind_key,
)
for student_team, assignment_name in itertools.product(
student_teams, assignment_names
)
]
anon_repo_urls = api.get_repo_urls(anon_repo_names)
anon_repos = api.get_repos(anon_repo_urls)
anon_repos_progress = plug.cli.io.progress_bar(
anon_repos,
desc="Deleting anonymous repo copies",
total=len(anon_repo_names),
)
for repo in anon_repos_progress:
api.delete_repo(repo)
progresswrappers.end_progress(anon_repos_progress)
def check_peer_review_progress(
assignment_names: Iterable[str],
teams: Iterable[plug.Team],
title_regex: str,
num_reviews: int,
double_blind_key: Optional[str],
api: plug.PlatformAPI,
) -> None:
"""Check which teams have opened peer review issues in their allotted
review repos
Args:
assignment_names: Names of assignments.
teams: An iterable of student teams.
title_regex: A regex to match against issue titles.
num_reviews: Amount of reviews each student is expected to have made.
api: An implementation of :py:class:`repobee_plug.PlatformAPI` used to
interface with the platform (e.g. GitHub or GitLab) instance.
"""
teams = list(teams)
reviews = collections.defaultdict(list)
review_team_names = [
_review_team_name(student_team, assignment_name, double_blind_key)
for student_team in teams
for assignment_name in assignment_names
]
rainbow_table = {
_hash_if_key(repo_name, key=double_blind_key): repo_name
for repo_name in plug.generate_repo_names(teams, assignment_names)
}
review_teams = progresswrappers.get_teams(
review_team_names, api, desc="Processing review teams"
)
for review_team in review_teams:
repos = list(api.get_team_repos(review_team))
if len(repos) != 1:
plug.log.warning(
f"Expected {review_team.name} to have 1 associated "
f"repo, found {len(repos)}. "
f"Skipping..."
)
continue
reviewed_repo = repos[0]
expected_reviewers = set(review_team.members)
reviewing_teams = _extract_reviewing_teams(teams, expected_reviewers)
review_issue_authors = {
issue.author
for issue in api.get_repo_issues(reviewed_repo)
if re.match(title_regex, issue.title)
}
for team in reviewing_teams:
reviews[str(team)].append(
plug.Review(
repo=rainbow_table[reviewed_repo.name],
done=any(
map(review_issue_authors.__contains__, team.members)
),
)
)
plug.echo(
formatters.format_peer_review_progress_output(
reviews, [team.name for team in teams], num_reviews
)
)
def check_reviews_repobee_4(
allocations_file: pathlib.Path, title_regex: str, api: plug.PlatformAPI
) -> None:
"""Preview version of the `reviews check` command for RepoBee 4."""
data = json.loads(allocations_file.read_text(sys.getdefaultencoding()))
review_allocations = data["allocations"]
num_reviews = int(data["num_reviews"])
expected_reviewers = {
allocation["reviewed_repo"]["url"]: allocation["review_team"][
"members"
]
for allocation in review_allocations
}
reviewed_repos = progresswrappers.get_repos(expected_reviewers.keys(), api)
reviews = collections.defaultdict(list)
for reviewed_repo in reviewed_repos:
review_issue_authors = {
issue.author
for issue in api.get_repo_issues(reviewed_repo)
if re.match(title_regex, issue.title)
}
for expected_reviewer in expected_reviewers[reviewed_repo.url]:
reviews[expected_reviewer].append(
plug.Review(
repo=reviewed_repo.name,
done=expected_reviewer in review_issue_authors,
)
)
plug.echo(
formatters.format_peer_review_progress_output(
reviews,
list(itertools.chain.from_iterable(expected_reviewers.values())),
num_reviews,
)
)
def _review_team_name(
team: Union[str, plug.Team, plug.StudentTeam],
assignment: str,
key: Optional[str],
) -> str:
if key:
return _hash_if_key(str(team), key)
else:
return plug.generate_review_team_name(team, assignment)
def _extract_reviewing_teams(teams, reviewers):
review_teams = []
for team in teams:
if any(map(team.members.__contains__, reviewers)):
review_teams.append(team)
return review_teams | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/_repobee/command/peer.py | 0.61115 | 0.174199 | peer.py | pypi |
from typing import Optional
import repobee_plug as plug
TEACHERS_TEAM_NAME = "repobee-teachers"
PLUGIN_DESCRIPTION = """Manager plugin for adding and removing
teachers/teaching assistants from the taget organization. Teachers are granted
read access to all repositories in the organization. This plugin should not be
used with GitLab due to performance issues. (NOTE: This plugin is not stable
yet and may change without notice)""".replace(
"\n", " "
)
_ADD_TEACHERS_DESCRIPTION = f"""
Add teachers/teaching assistants to the `{TEACHERS_TEAM_NAME}` team. This team
is in turn granted read access to all repositories in the organization. The
point of this is to allow a course responsible to allow teaching assistants to
access student repositories without being able to manipulate them. To revoke
read access, simply manually remove users from `{TEACHERS_TEAM_NAME}`.
""".replace(
"\n", " "
)
class AddTeachers(plug.Plugin, plug.cli.Command):
__settings__ = plug.cli.command_settings(
category=plug.cli.CoreCommand.teams,
action="add-teachers",
help="add teachers/teaching assistants to the organization, with read "
"access to all repositories",
description=_ADD_TEACHERS_DESCRIPTION,
)
teachers = plug.cli.option(
help="one or more teachers to add", argparse_kwargs=dict(nargs="+")
)
def command(self, api: plug.PlatformAPI) -> Optional[plug.Result]:
teachers_team = _get_or_create_team(TEACHERS_TEAM_NAME, api)
existing_members = teachers_team.members
new_members = list(set(self.teachers) - set(existing_members))
api.assign_members(
teachers_team, new_members, permission=plug.TeamPermission.PULL
)
for repo in plug.cli.io.progress_bar(
api.get_repos(), desc="Granting read access to repos"
):
api.assign_repo(
repo=repo,
team=teachers_team,
permission=plug.TeamPermission.PULL,
)
msg = (
f"Added {', '.join(new_members)} to the '{TEACHERS_TEAM_NAME}' "
"team"
)
return plug.Result(
name="add-teachers", status=plug.Status.SUCCESS, msg=msg
)
def post_setup(self, repo: plug.StudentRepo, api: plug.PlatformAPI):
"""Add a created student repo to the teachers team."""
platform_repo = next(iter(api.get_repos([repo.url])))
teachers_team = _get_or_create_team(TEACHERS_TEAM_NAME, api)
api.assign_repo(
team=teachers_team,
repo=platform_repo,
permission=plug.TeamPermission.PULL,
)
return plug.Result(
name="tamanager",
status=plug.Status.SUCCESS,
msg=f"Added to the {TEACHERS_TEAM_NAME} team",
)
def _get_or_create_team(team_name: str, api: plug.PlatformAPI) -> plug.Team:
matches = api.get_teams(team_names=[team_name])
try:
return next(iter(matches))
except StopIteration:
return api.create_team(
TEACHERS_TEAM_NAME, permission=plug.TeamPermission.PULL
) | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/_repobee/ext/tamanager.py | 0.733452 | 0.153454 | tamanager.py | pypi |
import configparser
import pathlib
import os
from typing import Any, Optional, List
from typing_extensions import Protocol
from repobee_plug import exceptions
__all__ = ["Config", "ConfigSection"]
class ConfigSection(Protocol):
"""Protocol defining how a section of the config behaves."""
def __getitem__(self, key: str) -> Any:
...
def __setitem__(self, key: str, value: Any) -> None:
...
def __contains__(self, key: str) -> bool:
...
class Config:
"""Object representing RepoBee's config.
This class defines read-only inheritance. This means that when you read a
value from the config, for example with :py:meth:`get`, it will do a
recursive lookup in parent configs.
Writing to a config object, e.g. ``config[section][option] = value`` does
*not* respect inheritance, and unconditionally writes to *this* config, and
not any of its parents. Similarly, writing to disk with :py:meth:`store`
only writes to the most local config, and not to any of the parent configs.
.. important::
Changes to the config are only persisted if the :py:meth:`Config.store`
method is called.
.. warning::
The behavior of this class is currently not stable. Any minor release
of RepoBee might bring breaking changes.
"""
CORE_SECTION_NAME = "repobee"
PARENT_CONFIG_KEY = "parent_config"
def __init__(self, config_path: pathlib.Path):
super().__init__()
self._config_path = config_path
self._config_parser = configparser.ConfigParser()
self._parent: Optional[Config] = None
self.create_section(self.CORE_SECTION_NAME)
self._check_for_cycle(paths=[])
self.refresh()
def refresh(self) -> None:
"""Refresh the parser by reading from the config file. Does nothing if
the config file does not exist.
"""
if self._config_path.exists():
self._config_parser.read(self._config_path)
raw_parent_path = self.get(
self.CORE_SECTION_NAME, self.PARENT_CONFIG_KEY
)
if raw_parent_path:
parent_path = self._resolve_absolute_parent_path(
raw_parent_path
)
self._parent = Config(parent_path)
def _resolve_absolute_parent_path(
self, raw_parent_path: str
) -> pathlib.Path:
parent_path = pathlib.Path(raw_parent_path)
return (
parent_path
if parent_path.is_absolute()
else (self.path.parent / parent_path).resolve(strict=False)
)
def store(self) -> None:
"""Write the current state of the config to the config file. If the
directory does not exist, it is created.
"""
if not self._config_path.exists():
os.makedirs(self._config_path.parent, mode=0o700, exist_ok=True)
with open(self._config_path, encoding="utf8", mode="w") as f:
self._config_parser.write(f)
def create_section(self, section_name: str) -> None:
"""Add a section to the config.
Args:
section_name: Name of the section.
"""
return self._config_parser.add_section(section_name)
def get(
self, section_name: str, key: str, fallback: Optional[Any] = None
) -> Optional[Any]:
"""Get a value from the given section.
Args:
section_name: Name of the section.
key: Key to get the value for.
fallback: An optional fallback value to use if the section or key
do not exist.
Returns:
The value for the section and key, or the fallback value if neither
exist.
"""
return self._config_parser.get(
section_name,
key,
fallback=self.parent.get(section_name, key, fallback)
if self.parent
else fallback,
)
@property
def path(self) -> pathlib.Path:
"""Path to the config file."""
return self._config_path
@property
def parent(self) -> Optional["Config"]:
"""Returns the parent config if defined, otherwise None."""
return self._parent
@parent.setter
def parent(self, value: "Config") -> None:
self._parent = value
self[self.CORE_SECTION_NAME][self.PARENT_CONFIG_KEY] = str(value.path)
self._check_for_cycle([])
def __getitem__(self, section_key: str) -> ConfigSection:
return _ParentAwareConfigSection(self, section_key)
def __contains__(self, section_name: str) -> bool:
return section_name in self._config_parser
def _check_for_cycle(self, paths: List[pathlib.Path]) -> None:
"""Check if there's a cycle in the inheritance."""
if self.path in paths:
cycle = " -> ".join(map(str, paths + [self.path]))
raise exceptions.PlugError(
f"Cyclic inheritance detected in config: {cycle}"
)
elif self.parent is not None:
self.parent._check_for_cycle(paths + [self.path])
class _ParentAwareConfigSection:
"""A section of the config that respects sections from parent configs."""
def __init__(self, config: Config, section_key: str):
self._config = config
self._section_key = section_key
def __getitem__(self, key: str):
value = self._config.get(self._section_key, key)
if value is None:
raise KeyError(key)
else:
return value
def __setitem__(self, key: str, value: Any):
self._config._config_parser.set(self._section_key, key, value)
def __contains__(self, key: str) -> bool:
return self._config.get(self._section_key, key) is not None | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/repobee_plug/config.py | 0.885582 | 0.187188 | config.py | pypi |
import dataclasses
import inspect
import enum
import itertools
from typing import List, Iterable, Optional, Any
from repobee_plug import exceptions
class APIObject:
"""Base wrapper class for platform API objects."""
def __getattribute__(self, name: str):
"""If the sought attr is 'implementation', and that attribute is None,
an AttributeError should be raise. This is because there should never
be a case where the caller tries to access a None implementation: if
it's None the caller should now without checking, as only API objects
returned by platform API (i.e. a class deriving from :py:class:`API`)
can have a reasonable value for the implementation attribute.
In all other cases, proceed as usual in getting the attribute. This
includes the case when ``name == "implementation"``, and the APIObject
does not have that attribute.
"""
attr = object.__getattribute__(self, name)
if attr is None and name == "implementation":
raise AttributeError(
"invalid access to 'implementation': not initialized"
)
return attr
class TeamPermission(enum.Enum):
"""Enum specifying team permissions on creating teams. On GitHub, for
example, this can be e.g. `push` or `pull`.
"""
PUSH = "push"
PULL = "pull"
class IssueState(enum.Enum):
"""Enum specifying a possible issue state."""
OPEN = "open"
CLOSED = "closed"
ALL = "all"
@dataclasses.dataclass(frozen=True)
class Team(APIObject):
"""Wrapper class for a Team API object."""
members: Iterable[str] = dataclasses.field(compare=False)
name: str = dataclasses.field(compare=True)
id: Any = dataclasses.field(compare=False)
implementation: Any = dataclasses.field(compare=False, repr=False)
def __post_init__(self):
object.__setattr__(self, "members", [m.lower() for m in self.members])
def __str__(self):
return self.name
def __lt__(self, o):
return isinstance(o, Team) and self.name < o.name
def __eq__(self, o):
return isinstance(o, Team) and self.name == o.name
@dataclasses.dataclass
class Issue(APIObject):
"""Wrapper class for an Issue API object."""
title: str
body: str
number: Optional[int] = None
created_at: Optional[str] = None
author: Optional[str] = None
state: Optional[IssueState] = None
implementation: Optional[Any] = dataclasses.field(
compare=False, repr=False, default=None
)
def __post_init__(self):
object.__setattr__(
self,
"author",
self.author.lower() if self.author is not None else None,
)
def to_dict(self):
"""Return a dictionary representation of this namedtuple, without
the ``implementation`` field.
"""
asdict = {
"title": self.title,
"body": self.body,
"number": self.number,
"created_at": self.created_at,
"author": self.author,
}
return asdict
@staticmethod
def from_dict(asdict: dict) -> "Issue":
"""Take a dictionary produced by Issue.to_dict and reconstruct the
corresponding instance. The ``implementation`` field is lost in a
to_dict -> from_dict roundtrip.
"""
return Issue(**asdict)
@dataclasses.dataclass
class Repo(APIObject):
"""Wrapper class for a Repo API object."""
name: str
description: str
private: bool
url: str
implementation: Any = dataclasses.field(compare=False, repr=False)
class _APISpec:
"""Wrapper class for API method stubs.
.. important::
This class should not be inherited from directly, it serves only to
document the behavior of a platform API. Classes that implement this
behavior should inherit from :py:class:`PlatformAPI`.
"""
def __init__(self, base_url, token, org_name, user):
_not_implemented()
def create_team(
self,
name: str,
members: Optional[List[str]] = None,
permission: TeamPermission = TeamPermission.PUSH,
) -> Team:
"""Create a team on the platform.
Args:
name: Name of the team.
members: A list of usernames to assign as members to this team.
Usernames that don't exist are ignored.
permission: The permission the team should have in regards to
repository access.
Returns:
The created team.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform, in particular if the team
already exists.
"""
_not_implemented()
def delete_team(self, team: Team) -> None:
"""Delete the provided team.
Args:
team: The team to delete.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def get_teams(
self, team_names: Optional[Iterable[str]] = None
) -> Iterable[Team]:
"""Get teams from the platform.
Args:
team_names: Team names to filter by. Names that do not exist on the
platform are ignored. If ``team_names=None``, all teams are
fetched.
Returns:
Teams matching the filters.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def assign_repo(
self, team: Team, repo: Repo, permission: TeamPermission
) -> None:
"""Assign a repository to a team, granting any members of the team
permission to access the repository according to the specified
permission.
Args:
team: The team to assign the repository to.
repo: The repository to assign to the team.
permission: The permission granted to the team's members with
respect to accessing the repository.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def assign_members(
self,
team: Team,
members: Iterable[str],
permission: TeamPermission = TeamPermission.PUSH,
) -> None:
"""Assign members to a team.
Args:
team: A team to assign members to.
members: A list of usernames to assign as members to the team.
Usernames that don't exist are ignored.
permission: The permission to add users with.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def create_repo(
self,
name: str,
description: str,
private: bool,
team: Optional[Team] = None,
) -> Repo:
"""Create a repository.
If the repository already exists, it is fetched instead of created.
This somewhat unintuitive behavior is to speed up repository creation,
as first checking if the repository exists can be a bit inconvenient
and/or inefficient depending on the platform.
Args:
name: Name of the repository.
description: Description of the repository.
private: Visibility of the repository.
team: The team the repository belongs to.
Returns:
The created (or fetched) repository.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def delete_repo(self, repo: Repo) -> None:
"""Delete a repository.
Args:
repo: The repository to delete.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def get_repos(
self, repo_urls: Optional[List[str]] = None
) -> Iterable[Repo]:
"""Get repositories from the platform.
Args:
repo_urls: Repository URLs to filter the results by. URLs that do
not exist on the platform are ignored. If ``repo_urls=None``,
all repos are fetched.
Returns:
Repositories matching the filters.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def get_repo(self, repo_name: str, team_name: Optional[str]) -> Repo:
"""Get a single repository.
Args:
repo_name: Name of the repository to fetch.
team_name: Name of the team that owns the repository. If ``None``,
the repository is assumed to belong to the target organization.
Returns:
The fetched repository.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform, in particular if the repo
or team does not exist.
"""
_not_implemented()
def insert_auth(self, url: str) -> str:
"""Insert authorization token into the provided URL.
Args:
url: A URL to the platform.
Returns:
The same url, but with authorization credentials inserted.
Raises:
:py:class:`exceptions.InvalidURL`: If the provided URL does not
point to anything on the platform.
"""
_not_implemented()
def create_issue(
self,
title: str,
body: str,
repo: Repo,
assignees: Optional[Iterable[str]] = None,
) -> Issue:
"""Create an issue in the provided repository.
Args:
title: Title of the issue.
body: Body of the issue.
repo: The repository in which to open the issue.
assignees: Usernames to assign to the issue.
Returns:
The created issue.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def close_issue(self, issue: Issue) -> None:
"""Close the provided issue.
Args:
issue: The issue to close.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def get_team_repos(self, team: Team) -> Iterable[Repo]:
"""Get all repos related to a team.
Args:
team: The team to fetch repos from.
Returns:
The repos related to the provided team.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def get_repo_issues(self, repo: Repo) -> Iterable[Issue]:
"""Get all issues related to a repo.
Args:
repo: The repo to fetch issues from.
Returns:
The issues related to the provided repo.
Raises:
:py:class:`exceptions.PlatformError`: If something goes wrong in
communicating with the platform.
"""
_not_implemented()
def get_repo_urls(
self,
assignment_names: Iterable[str],
org_name: Optional[str] = None,
team_names: Optional[List[str]] = None,
insert_auth: bool = False,
) -> List[str]:
"""Get repo urls for all specified repo names in the organization. As
checking if every single repo actually exists takes a long time with a
typical REST API, this function does not in general guarantee that the
urls returned actually correspond to existing repos.
If the ``org_name`` argument is supplied, urls are computed relative to
that organization. If it is not supplied, the target organization is
used.
If the `teams` argument is supplied, student repo urls are
computed instead of master repo urls.
Args:
assignment_names: A list of master repository names.
org_name: Organization in which repos are expected. Defaults to the
target organization of the API instance.
team_names: A list of team names specifying student groups.
Returns:
a list of urls corresponding to the repo names.
"""
_not_implemented()
def extract_repo_name(self, repo_url: str) -> str:
"""Extract a repo name from the provided url.
Args:
repo_url: A URL to a repository.
Returns:
The name of the repository corresponding to the url.
"""
_not_implemented()
def for_organization(self, org_name: str) -> "PlatformAPI":
"""Create a copy of this API instance, targeting the given
organization.
Args:
org_name: Name of the organization to target.
Returns:
A copy of the receiver API, but targeting the provided
organization.
"""
_not_implemented()
@staticmethod
def verify_settings(
user: str,
org_name: str,
base_url: str,
token: str,
template_org_name: Optional[str] = None,
):
"""Verify the following (to the extent that is possible and makes sense
for the specific platform):
0. An internet connection is available
1. Base url is correct
2. The token has sufficient access privileges
3. Target organization (specifiend by ``org_name``) exists
- If template_org_name is supplied, this is also checked to
exist.
4. User is owner in organization (verify by getting
- If template_org_name is supplied, user is also checked to be an
owner of it.
organization member list and checking roles)
Should raise an appropriate subclass of
:py:class:`~repobee_plug.PlatformError` when a problem is
encountered.
Args:
user: The username to try to fetch.
org_name: Name of the target organization.
base_url: A base url to a github API.
token: A secure OAUTH2 token.
org_name: Name of the master organization.
Returns:
True if the connection is well formed.
Raises:
:py:class:`~repobee_plug.PlatformError`
"""
_not_implemented()
def _not_implemented():
raise NotImplementedError(
"The chosen API does not currently support this functionality"
)
def methods(attrdict):
"""Return all public methods and __init__ for some class."""
return {
name: method
for name, method in attrdict.items()
if callable(method)
and (not name.startswith("_") or name == "__init__")
}
def parameters(function):
"""Extract parameter names and default arguments from a function."""
return [
(param.name, param.default)
for param in inspect.signature(function).parameters.values()
]
def check_init_params(reference_params, compare_params):
"""Check that the compare __init__'s parameters are a subset of the
reference class's version.
"""
extra = set(compare_params) - set(reference_params)
if extra:
raise exceptions.APIImplementationError(
f"unexpected arguments to __init__: {extra}"
)
def check_parameters(reference, compare):
"""Check if the parameters match, one by one. Stop at the first diff and
raise an exception for that parameter.
An exception is made for __init__, for which the compare may be a subset of
the reference in no particular order.
"""
reference_params = parameters(reference)
compare_params = parameters(compare)
if reference.__name__ == "__init__":
check_init_params(reference_params, compare_params)
return
for ref, cmp in itertools.zip_longest(reference_params, compare_params):
if ref != cmp:
raise exceptions.APIImplementationError(
f"{reference.__name__}: expected parameter '{ref}', found '{cmp}'"
)
class _APIMeta(type):
"""Metaclass for an API implementation. All public methods must be a
specified api method, but all api methods do not need to be implemented.
"""
def __new__(cls, name, bases, attrdict):
api_methods = methods(_APISpec.__dict__)
implemented_methods = methods(attrdict)
non_api_methods = set(implemented_methods.keys()) - set(
api_methods.keys()
)
if non_api_methods:
raise exceptions.APIImplementationError(
f"non-API methods may not be public: {non_api_methods}"
)
for method_name, method in api_methods.items():
if method_name in implemented_methods:
check_parameters(method, implemented_methods[method_name])
return super().__new__(cls, name, bases, attrdict)
class PlatformAPI(_APISpec, metaclass=_APIMeta):
"""API base class that all API implementations should inherit from. This
class functions similarly to an abstract base class, but with a few key
distinctions that affect the inheriting class.
1. Public methods *must* override one of the public methods of
:py:class:`_APISpec`. If an inheriting class defines any other public
method, an :py:class:`~repobee_plug.PlatformError` is raised when the
class is defined.
2. All public methods in :py:class:`_APISpec` have a default implementation
that simply raise a :py:class:`NotImplementedError`. There is no
requirement to implement any of them.
""" | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/repobee_plug/platform.py | 0.920491 | 0.273517 | platform.py | pypi |
import collections
from typing import Optional, Mapping, Callable, Any, TypeVar
from repobee_plug import exceptions
AnyFunction = Callable[..., Any]
T = TypeVar("T")
Deprecation = collections.namedtuple(
"Deprecation", ["replacement", "remove_by_version"]
)
Deprecation.__doc__ = """
Args:
replacement (str): The functionality that replaces the deprecated
functionality.
remove_by_version (str): A version number on the form
``MAJOR.MINOR.PATCH`` by which the deprecated functionality will be
removed.
"""
def deprecate(
remove_by_version: str, replacement: Optional[str] = None
) -> Callable[[T], T]:
"""Return a function that can be used to deprecate functions. Currently
this is only used for deprecation of hook functions, but it may be expanded
to deprecated other things in the future.
Args:
remove_by_version: A string that should contain a version number.
replacement: An optional string with the name of the replacing
function.
Returns:
A function
"""
dep = Deprecation(
replacement=replacement, remove_by_version=remove_by_version
)
def _inner(func):
if "repobee_plug_spec" not in dir(func):
raise exceptions.PlugError(
"can't deprecate non-hook function", func=func
)
deprs = _Deprecations()
deprs.deprecate_hook(func.__name__, dep)
return func
return _inner
def deprecated_hooks() -> Mapping[str, Deprecation]:
"""
Returns:
A mapping of hook names to :py:class:`~containers.Deprecation` tuples.
"""
return dict(_Deprecations().deprecated_hooks)
class _Deprecations:
"""Class for keeping track of deprecated functionality. This class is
singleton and is meant to be accessed by using its constructor. That is to
say, every call to ``Deprecations()`` will return the same instance, only
the first call will actually instantiate a new instance.
"""
_instance = None
deprecated_hooks: dict
def __new__(cls):
if cls._instance is None:
inst = super().__new__(cls)
inst.deprecated_hooks = {}
cls._instance = inst
return cls._instance
def deprecate_hook(self, hook_name: str, deprecation: Deprecation) -> None:
"""Deprecate a hook function with the given name.
Args:
hook_name: Name of the hook to deprecate.
deprecation: A Deprecation tuple.
"""
self.deprecated_hooks[hook_name] = deprecation | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/repobee_plug/deprecation.py | 0.878627 | 0.316039 | deprecation.py | pypi |
class PlugError(Exception):
"""Base class for all repobee_plug exceptions."""
def __init__(self, *args, **kwargs):
"""Instantiate a PlugError.
Args:
args: List of positionals. These are passed directly to
:py:class:`Exception`. Typically, you should only
pass an error message here.
kwargs: Keyword arguments to indicate what went wrong.
For example, if the argument ``a`` caused the error, then you
should pass ``a=a`` as a kwarg so it can be introspected at a
later time.
"""
super().__init__(*args)
self._kwargs = kwargs
@property
def kwargs(self):
return dict(self._kwargs)
def __str__(self):
formatted_args = super().__str__()
formatted_kwargs = (
""
if not self._kwargs
else ". Passed arguments: "
+ ", ".join(
f"{key}={value}" for key, value in self._kwargs.items()
)
)
return f"{formatted_args}{formatted_kwargs}"
class HookNameError(PlugError):
"""Raise when a public method in a class that inherits from
:py:class:`~repobee_plug.Plugin` does not have a hook name.
"""
class InternetConnectionUnavailable(PlugError):
"""Raise when an internet connection cannot be established."""
def __init__(self, msg="could not establish an Internet connection"):
super().__init__(msg)
class APIImplementationError(PlugError):
"""Raise when an API is defined incorrectly."""
class PlatformError(PlugError):
"""An exception raised when the API responds with an error code."""
def __init__(self, msg="", status=None):
super().__init__(msg)
self.status = status
class NotFoundError(PlatformError):
"""An exception raised when a platform API fails to find a resource."""
class ServiceNotFoundError(PlatformError):
"""Raise if the base url can't be located."""
class BadCredentials(PlatformError):
"""Raise when credentials are rejected."""
class UnexpectedException(PlatformError):
"""An exception raised when an API request raises an unexpected
exception.
"""
class InvalidURL(PlatformError):
"""Error to raise if a URL is provided to the platform API, but it is not a
valid URL for the platform.
"""
class FileError(PlugError):
"""Raise if something goes wrong with reading from or writing to a file.""" | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/repobee_plug/exceptions.py | 0.957831 | 0.262903 | exceptions.py | pypi |
import abc
from typing import Tuple, Set, List, Mapping, Optional, Iterable, Union
from repobee_plug._immutable import ImmutableMixin
class Category(ImmutableMixin, abc.ABC):
"""Class describing a command category for RepoBee's CLI. The purpose of
this class is to make it easy to programmatically access the different
commands in RepoBee.
A full command in RepoBee typically takes the following form:
.. code-block:: bash
$ repobee <category> <action> [options ...]
For example, the command ``repobee issues list`` has category ``issues``
and action ``list``. Actions are unique only within their category.
"""
help: str = ""
description: str = ""
name: str
actions: Tuple["Action"]
action_names: Set[str]
_action_table: Mapping[str, "Action"]
def __init__(
self,
name: Optional[str] = None,
action_names: Optional[Set[str]] = None,
help: Optional[str] = None,
description: Optional[str] = None,
):
# determine the name of this category based on the runtime type of the
# inheriting class
name = name or self.__class__.__name__.lower().strip("_")
# determine the action names based on type annotations in the
# inheriting class
action_names = (action_names or set()) | {
name
for name, tpe in self.__annotations__.items()
if isinstance(tpe, type) and issubclass(tpe, Action)
}
object.__setattr__(self, "help", help or self.help)
object.__setattr__(
self, "description", description or self.description
)
object.__setattr__(self, "name", name)
object.__setattr__(self, "action_names", set(action_names))
# This is just to reserve the name 'actions'
object.__setattr__(self, "actions", None)
for key in self.__dict__:
if key in action_names:
raise ValueError(f"Illegal action name: {key}")
actions = []
for action_name in action_names:
action = Action(action_name.replace("_", "-"), self)
object.__setattr__(self, action_name.replace("-", "_"), action)
actions.append(action)
object.__setattr__(self, "actions", tuple(actions))
object.__setattr__(self, "_action_table", {a.name: a for a in actions})
def get(self, key: str) -> Optional["Action"]:
return self._action_table.get(key)
def __getitem__(self, key: str) -> "Action":
return self._action_table[key]
def __iter__(self) -> Iterable["Action"]:
return iter(self.actions)
def __len__(self):
return len(self.actions)
def __repr__(self):
return f"Category(name={self.name}, actions={self.action_names})"
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.name == other.name
def __hash__(self):
return hash(repr(self))
def __getattr__(self, key):
"""We implement getattr such that linters won't complain about
dynamically added members.
"""
return object.__getattribute__(self, key)
class Action(ImmutableMixin):
"""Class describing a RepoBee CLI action.
Attributes:
name: Name of this action.
category: The category this action belongs to.
"""
name: str
category: Category
def __init__(self, name: str, category: Category):
object.__setattr__(self, "name", name)
object.__setattr__(self, "category", category)
def __repr__(self):
return f"<Action(name={self.name},category={self.category})>"
def __str__(self):
return f"{self.category.name} {self.name}"
def __eq__(self, other):
return (
isinstance(other, self.__class__)
and self.name == other.name
and self.category == other.category
)
def __hash__(self):
return hash(str(self))
def as_name_dict(self) -> Mapping[str, str]:
"""This is a convenience method for testing that returns a dictionary
on the following form:
.. code-block:: python
{"category": self.category.name "action": self.name}
Returns:
A dictionary with the name of this action and its category.
"""
return {"category": self.category.name, "action": self.name}
def as_name_tuple(self) -> Tuple[str, str]:
"""This is a convenience method for testing that returns a tuple
on the following form:
.. code-block:: python
(self.category.name, self.name)
Returns:
A dictionary with the name of this action and its category.
"""
return (self.category.name, self.name)
def astuple(self) -> Tuple["Category", "Action"]:
"""Same as :py:meth:`Action.as_name_tuple`, but with the proper
:py:class:`Category` and :py:class:`Action` objects instead of strings.
Returns:
A tuple with the category and action.
"""
return (self.category, self)
def asdict(self) -> Mapping[str, Union["Category", "Action"]]:
"""Same as :py:meth:`Action.as_name_dict`, but with the proper
:py:class:`Category` and :py:class:`Action` objects instead of strings.
Returns:
A dictionary with the category and action.
"""
return {"category": self.category, "action": self}
def category(
name: str, action_names: List[str], help: str = "", description: str = ""
) -> "Category":
"""Create a category for CLI actions.
Args:
name: Name of the category.
action_names: The actions of this category.
Returns:
A CLI category.
"""
return Category(
name=name,
action_names=set(action_names),
help=help,
description=description,
) | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/repobee_plug/cli/categorization.py | 0.913392 | 0.287087 | categorization.py | pypi |
import collections
from typing import Optional, Union, List
from repobee_plug.cli.categorization import Category, Action
from repobee_plug.cli.base import BaseParser
CommandSettings = collections.namedtuple(
"CommandSettings",
[
"action",
"category",
"help",
"description",
"base_parsers",
"config_section_name",
],
)
CommandExtensionSettings = collections.namedtuple(
"CommandExtensionSettings", ["actions", "config_section_name"]
)
def command_settings(
action: Optional[Union[str, Action]] = None,
category: Optional[Category] = None,
help: str = "",
description: str = "",
base_parsers: Optional[List[BaseParser]] = None,
config_section_name: Optional[str] = None,
) -> CommandSettings:
"""Create a settings object for a :py:class:`Command`.
Example usage:
.. code-block:: python
:caption: ext.py
import repobee_plug as plug
class Ext(plug.Plugin, plug.cli.Command):
__settings__ = plug.cli.command_settings(
action_name="hello",
category=plug.cli.CoreCommand.config,
)
def command(self):
print("Hello, world!")
This can then be called with:
.. code-block:: bash
$ repobee -p ext.py config hello
Hello, world!
Args:
action: The name of this command, or a :py:class:`Action` object that
defines both category and action for the command. Defaults to the
name of the plugin class.
category: The category to place this command in. If not specified,
then the command will be top-level (i.e. uncategorized). If
``action`` is an :py:class:`Action` (as opposed to a ``str``),
then this argument is not allowed.
help: A help section for the command. This appears when listing the
help section of the command's category.
description: A help section for the command. This appears when
listing the help section for the command itself.
base_parsers: A list of base parsers to add to the command.
config_section_name: The name of the configuration section the
command should look for configurable options in. Defaults
to the name of the plugin the command is defined in.
Returns:
A settings object used internally by RepoBee.
"""
if isinstance(action, Action):
if category:
raise TypeError(
"argument 'category' not allowed when argument 'action' is an "
"Action object"
)
category = action.category
return CommandSettings(
action=action,
category=category,
help=help,
description=description,
base_parsers=base_parsers,
config_section_name=config_section_name,
)
def command_extension_settings(
actions: List["Action"], config_section_name: Optional[str] = None
) -> CommandExtensionSettings:
"""Settings for a :py:class:`CommandExtension`.
Args:
actions: A list of actions to extend.
config_section_name: Name of the configuration section that the
command extension will fetch configuration values from.
Defaults to the name of the plugin in which the extension is
defined.
Returns:
A wrapper object for settings.
"""
if not actions:
raise ValueError(
f"argument 'actions' must be a non-empty list: {actions}"
)
return CommandExtensionSettings(
actions=actions, config_section_name=config_section_name
) | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/repobee_plug/cli/settings.py | 0.892931 | 0.175892 | settings.py | pypi |
import enum
import itertools
from typing import Iterator
from repobee_plug.cli.categorization import Category, Action
from repobee_plug._immutable import ImmutableMixin
class _CoreCommand(ImmutableMixin):
"""The core CLI specification for RepoBee. Specifies the categories and
their actions.
"""
def iter_actions(self) -> Iterator[Action]:
"""Iterate over all command actions."""
return iter(self)
def __call__(self, key):
category_map = {c.name: c for c in self._categories}
if key not in category_map:
raise ValueError(f"No such category: '{key}'")
return category_map[key]
def __iter__(self) -> Iterator[Action]:
return itertools.chain.from_iterable(map(iter, self._categories))
def __len__(self):
return sum(map(len, self._categories))
@property
def _categories(self):
return [
attr
for attr in self.__class__.__dict__.values()
if isinstance(attr, Category)
]
class _Repos(Category):
setup: Action
update: Action
clone: Action
migrate: Action
class _Issues(Category):
open: Action
close: Action
list: Action
class _Config(Category):
show: Action
verify: Action
class _Reviews(Category):
assign: Action
check: Action
end: Action
class _Teams(Category):
create: Action
repos = _Repos()
issues = _Issues()
config = _Config()
reviews = _Reviews()
teams = _Teams()
class BaseParser(enum.Enum):
"""Enumeration of base parsers that an extension command can request to
have added to it.
Attributes:
BASE: Represents the base parser, which includes the ``--user``,
``--org-name``, ``--base-url`` and ``--token`` arguments.
STUDENTS: Represents the students parser, which includes the
``--students`` and `--students-file`` arguments.
ASSIGNMENTS: Represents the assignments parser, which includes the
``--assignments`` argument.
REPO_DISCOVERY: Represents the repo discovery parser, which adds
both the ``--assignments`` and the ``--discover-repos``
arguments.
TEMPLATE_ORG: Represents the master organization parser, which includes
the ``--master-org`` argument.
"""
BASE = "base"
STUDENTS = "students"
ASSIGNMENTS = "assignments"
REPO_DISCOVERY = "repo-discovery"
TEMPLATE_ORG = "template-org" | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/repobee_plug/cli/base.py | 0.834171 | 0.152442 | base.py | pypi |
import argparse
import inspect
from repobee_plug.cli import settings
class CommandExtension:
"""Mixin class for use with the Plugin class. Marks the extending class as
a command extension, that adds options to an existing command.
"""
args: argparse.Namespace
__settings__: settings.CommandExtensionSettings
def __getattr__(self, key):
"""We implement getattr such that linters won't complain about
dynamically added members.
"""
return object.__getattribute__(self, key)
class Command:
"""Mixin class for use with the Plugin class. Explicitly marks a class as
a plugin command.
A plugin command must have a command function defined in the class on the
following form:
.. code-block:: python
def command(self) -> Optional[plug.Result]:
pass
Note that the type hints are not required, so the callback can be defined
like this instead:
.. code-block:: python
def command(self):
pass
Example usage:
.. code-block:: python
:caption: command.py
import repobee_plug as plug
class Greeting(plug.Plugin, plug.cli.Command):
name = plug.cli.option(
short_name="-n", help="your name", required=True
)
age = plug.cli.option(
converter=int, help="your age", default=30
)
def command(self):
print(f"Hello, my name is {self.name} and I am {self.age}")
Note that the file is called ``command.py``. We can run this command with
RepoBee like so:
.. code-block:: bash
$ repobee -p command.py greeting -n Alice
Hello, my name is Alice and I am 30
If your command requires the platform api, simply add an argument called
``api`` to the ``command`` function.
.. code-block:: python
:caption: Command function that requires the platform API
def command(self, api: plug.PlatformAPI):
pass
"""
args: argparse.Namespace
__settings__: settings.CommandSettings
def __getattr__(self, key):
"""We implement getattr such that linters won't complain about
dynamically added members.
"""
return object.__getattribute__(self, key)
def __requires_api__(self) -> bool:
"""Returns ``True`` if this command requires the platform API."""
return "api" in inspect.signature(self.command).parameters | /repobee-3.8.1.tar.gz/repobee-3.8.1/src/repobee_plug/cli/commandmarkers.py | 0.735262 | 0.200382 | commandmarkers.py | pypi |
import logging
import os
import tempfile
from repoman.depot import Depot
from repoman.depot_operations import DepotOperations
from repoman.roster import Roster, RosterError
logger = logging.getLogger(__name__)
class CloneProvisionError(Exception):
""" Raised when a repo cannot be provisioned. """
pass
class DepotManager(object):
"""
Acts as an public facing API for working with managed clones.
:param main_workspace: directory where all the workspaces will be
created.
:type main_workspace: string
:param repo_kind: Repository type
:type repo_kind: string
:param main_source: FIXME
:type main_source: string
"""
# Name of the main repo cache.
cache_name = 'main_cache'
# Name of the file storing the roster.
squadron_roster_name = 'squadron_roster.db'
# Prefix for the clones used by the workers.
workspaces_prefix = 'workspace'
def __init__(self,
main_workspace="~/.repo",
repo_kind='hg',
main_source=None):
self.dvcs = DepotOperations.get_depot_operations(repo_kind)
try:
self.main_work_path = os.path.expanduser(main_workspace)
logger.debug('Main workspace: %s' % self.main_work_path)
self.main_cache_path = os.path.join(self.main_work_path,
DepotManager.cache_name)
self.squadron_roster_path = os.path.join(
self.main_work_path, DepotManager.squadron_roster_name)
# Create the environment.
if not os.path.isdir(self.main_work_path):
os.makedirs(self.main_work_path)
# Create main cache.
if not self.dvcs.is_a_depot(self.main_cache_path):
self.main_cache = self.dvcs.init_depot(self.main_cache_path,
source=main_source)
else:
self.main_cache = Depot(self.main_cache_path, None, self.dvcs)
self.roster = Roster(self.squadron_roster_path)
except Exception, e:
raise CloneProvisionError(e)
def _provision_new_clone(self):
try:
# Create a new safe directory for the clone.
clone_directory = tempfile.mkdtemp(
prefix=DepotManager.workspaces_prefix,
dir=self.main_work_path)
# Create repo (Using the cache)
result = self.dvcs.init_depot(
clone_directory, parent=self.main_cache)
except Exception:
logger.exception("Error provisioning new clone")
raise CloneProvisionError("Error provisioning new clone")
return result
def give_me_depot(self, task_guid, task_name,
requirements=None, default_source=None):
"""
Reserves or prepares a new repository workspace.
:param task_guid: Identifier of the task reserving the clone.
:param task_name: Name of the task for information purposes
:param requirements: requirements to pull
:param default_source: default clone source
:returns: a free repo.
:rtype: :py:class:`~repoman.depot.Depot`
:raises RepoProvisionError: When a new repo cannot be provisioned.
"""
assert task_guid, "Error getting clone, task_guid is mandatory"
assert task_name, "Error getting clone, task_name is mandatory"
try:
roster_entry = self.roster.reserve_clone(task_guid, task_name)
logger.debug('roster: %s' % roster_entry)
clone = self.dvcs.get_depot_from_path(
roster_entry.path, parent=self.main_cache)
except RosterError:
logger.debug('no roster entry found, cloning')
# Create a new clone in the squadron if none are free
clone = self._provision_new_clone()
self.roster.add(clone.path, task_guid, task_name)
if default_source is not None:
clone.set_source(default_source)
if requirements is not None:
# Request the refresh to comply with the requirements.
clone.request_refresh(requirements)
return clone
def give_me_depot_from_path(self, path):
"""
Gets a repository from the current path without checking its state, no
matter if it's FREE or INUSE
:param path: depot path to get
:type path: string
"""
if self.dvcs.is_a_depot(path):
return self.dvcs.get_depot_from_path(path, parent=self.main_cache)
raise CloneProvisionError(
"Error getting clone from path %s, it doesn't exist" % path)
def free_depot(self, depot, task_guid):
"""
Frees a repository for new uses.
:param clone: a RepoWorkspace to be freed from use.
:param task_guid: Identifier of the task reserving the clone.
:raises RepoFreeError: When a repo cannot be freed.
"""
self.dvcs.clear_depot(depot.path)
self.roster.free_clone(
self.get_not_available_clone(depot.path), task_guid)
@staticmethod
def _get_first_matching_clone(clone_list, path):
for clone in clone_list:
if clone.path == path:
return clone
return None
def get_available_clone(self, path):
"""
:returns: a clone with the available clone specified by path
:rtype: RepoWorkspace
"""
clone_list = self.roster.get_available()
return self._get_first_matching_clone(clone_list, path)
def get_not_available_clone(self, path):
"""
:returns: a clone with the not available clone specified by path
:rtype: RepoWorkspace
"""
clone_list = self.roster.get_not_available()
return self._get_first_matching_clone(clone_list, path) | /repoman-scm-0.7.1.tar.gz/repoman-scm-0.7.1/repoman/depot_manager.py | 0.583441 | 0.173884 | depot_manager.py | pypi |
import logging
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from repoman.utils.introspection.dynamic_class_loading import \
get_class_from_name
logger = logging.getLogger('MultiRepoIndexer')
class MultiRepoIndexer(object):
"""
MultiRepoIndexer is a repo indexer that search in multiple sources
ordered by priority. If one doesn't find the data, it will search
in the next one.
You can register as many repository indexers as you want
:param repository_name_matrix: is a dict that represent each repository
name for the registered indexers, the dict
key is the repository id
:type repository_name_matrix: dict
:param indexers_list: Every item in the list is a tuple
that contains: priority, indexer path, indexer id
and another tuple with the auth credentials
:type indexers_list: list of indexers
"""
_indexers = None
_repo_name_matrix = None
def __init__(self, repository_name_matrix, indexers_list=None):
self._indexers = OrderedDict()
self._repo_name_matrix = repository_name_matrix
if indexers_list:
for (priority, indexer, id, auth) in indexers_list:
self.register_indexer(priority, indexer, id, auth)
def register_indexer(self, priority, indexer, id, auth):
"""
This method register a new indexer
:param priority: indexer priority
:type priority: int
:param indexer: module plus class name to instantiate,
having this form: module1.module2.ClassName
:type indexer: string
:param id: repository
:type id: string
:param auth: tuple that contains 3 elements: url, user
and password
:type auth: tuple
"""
indexer_clazz = get_class_from_name(indexer)
if id in self._repo_name_matrix:
repo_name = self._repo_name_matrix[id]
self._indexers[priority] = indexer_clazz(
repo_name, auth[0], auth[1], auth[2])
self._indexers = self._sort_by_priority(self._indexers)
def _sort_by_priority(self, indexers):
sorted_indexers = OrderedDict()
keys = indexers.keys()
keys.sort(key=int)
for key in keys:
sorted_indexers[key] = indexers[key]
return sorted_indexers
def _call_indexers(self, func, *args):
for (priority, indexer) in self._indexers.iteritems():
try:
logger.info("Calling %s from\
%s" % (func, indexer.__class__.__name__))
method = getattr(indexer, func)
result = method(*args)
if result:
return result
except Exception:
logger.info("Error in indexer %s executing\
%s" % (indexer.__class__.__name__, func))
raise RepoIndexerError("Not found in any indexer")
def get_branches(self, limit=None):
"""
Returns the branches in the repository, it look for them in all the
registered repositories by priority
:param limit:
:type limit: int
:returns: list of strings with the name of the branches
"""
return self._call_indexers('get_branches', limit)
def get_branch_log(self, branch_name, limit=None):
"""
Returns the log for the given brancha and limit
:param branch_name:
:type branch_name: string
:param limit:
:type limit: int
:returns: list of tuples with the log information
"""
return self._call_indexers('get_branch_log', branch_name, limit)
class RepoIndexerError(Exception):
pass | /repoman-scm-0.7.1.tar.gz/repoman-scm-0.7.1/repoman/repo_indexer.py | 0.675229 | 0.216923 | repo_indexer.py | pypi |
from repoman.depot import Depot
class DepotOperations(object):
KIND = None
@classmethod
def get_depot_operations(cls, repo_kind):
try:
# __import__ in python < 2.7 works not very well
# TODO: migrate to python 3.0 and change this
mod = __import__("repoman.%s.depot_operations" % (repo_kind),
fromlist=['DepotOperations'])
ConcreteDepotOperations = getattr(mod, 'DepotOperations')
return ConcreteDepotOperations()
except:
raise NotImplementedError
def check_changeset_availability(self, path, changesets):
""" Check for changesets are already in the specified depot path.
Always request all changesets from all sources. This means
that the changesets will always be missing.
:param path: Path to the depot.
:param changesets: List of strings specifying the changesets.
:returns: List of changesets missing
"""
raise NotImplementedError
def grab_changesets(self, path, url, changesets):
"""
Copies changesets from the remote url to the specified path.
:param path: target depot for the changesets.
:param url: depot to copy the changesets from.
:param changesets: List of changesets ids.
:returns: True.
"""
raise NotImplementedError
def init_depot(self, path, parent=None, source=None):
"""
Initializes a new depot
:param path: path to the main depot
:returns: Depot class corresponding to the path. False otherwise.
"""
raise NotImplementedError
def is_a_depot(self, path):
"""
Check if the given path corresponds to a depot.
:param path: path to the supposed depot
:returns: True if a depot. False otherwise.
"""
raise NotImplementedError
def get_depot_from_path(self, path, parent=None):
"""
Factory method that creates Depots from a given path
:param path: Path of the depot
:returns: Depot class corresponding to the path.
"""
self._locks_cleanup(path)
return Depot(path, parent, self)
def _locks_cleanup(self, path):
"""
Make sure that a clone has no unreleased locks because of some failed
process.
Implementation is not mandatory, but recommended in SCMs with locking
mechanisms.
:param path: Path of the depot
"""
pass
def clear_depot(self, path, parent=None):
"""
Clear a depot just in case a previous usage let it dirty
This should also reset configuration
:param path: Path of the depot
:param parent:
"""
raise NotImplementedError
def set_source(self, path, source):
"""
Set the default remote source.
:param path: Path of the depot
:param source: Remote URI of the source repo
"""
raise NotImplementedError | /repoman-scm-0.7.1.tar.gz/repoman-scm-0.7.1/repoman/depot_operations.py | 0.492676 | 0.311571 | depot_operations.py | pypi |
import datetime
from hglib import util, client, templates, error, init, open, clone
# Ignore pyflakes warnings for unused imports
assert error
assert init
assert clone
assert open
# Monkeypatched to be redefined as HgClientExtensions
original_hgclient = client.hgclient
# Monkeypatch changeset template to add parents field, needed for our
# implementation of revision and parserevs
templates.changeset = (
'{rev}\\0'
'{node}\\0'
'{tags}\\0'
'{branch}\\0'
'{author}\\0'
'{desc}\\0'
'{date}\\0'
'{parents}\\0'
)
class revision(client.revision):
def __new__(cls, rev, node, tags, branch, author, desc, date, parents):
return tuple.__new__(
cls, (rev, node, tags, branch, author, desc, date, parents))
@property
def parents(self):
return self[7]
class HgClientExtensions(original_hgclient):
def strip(self, changeset):
""" Inherited method
:func:`~repoman.repository.Repository.strip`
"""
command_config = ["--config", "extensions.mq="]
command = ["strip", "-r", changeset]
self.rawcommand(command_config + command)
@staticmethod
def _parserevs(splitted):
''' splitted is a list of fields according to our rev.style, where each 6
fields compose one revision. '''
revs = []
for rev in util.grouper(8, splitted):
# truncate the timezone and convert to a local datetime
posixtime = float(rev[6].split('.', 1)[0])
dt = datetime.datetime.fromtimestamp(posixtime)
revision_fields = list(rev[:7])
revision_fields.insert(6, dt)
revs.append(revision(*revision_fields))
return revs
def unbundle(self, file, update=False, ssh=None, remotecmd=None,
insecure=False):
"""
Apply one or more compressed changegroup files generated by the bundle
command.
Returns True on success, False if an update has unresolved files.
file - source file name
update - update to new branch head if changesets were unbundled
ssh - specify ssh command to use
remotecmd - specify hg command to run on the remote side
insecure - do not verify server certificate (ignoring web.cacerts
config)
"""
args = util.cmdbuilder(
'unbundle', file,
u=update, e=ssh, remotecmd=remotecmd, insecure=insecure)
eh = util.reterrorhandler(args)
self.rawcommand(args, eh=eh)
return bool(eh)
def churn(self, revrange=None, date=None, template=None, dateformat=None,
files=[], changesets=False,
sort=None, include=None, exclude=None):
"""
histogram of changes to the repository
This command will display a histogram representing the number of
changed lines or revisions, grouped according to the given template.
The default template will group changes by author. The --dateformat
option may be used to group the results by date instead.
Statistics are based on the number of changed lines, or alternatively
the number of matching revisions if the --changesets option is
specified.
Examples:
# display count of changed lines for every committer
hg churn -t '{author|email}'
# display daily activity graph
hg churn -f '%H' -s -c
# display activity of developers by month
hg churn -f '%Y-%m' -s -c
# display count of lines changed in every year
hg churn -f '%Y' -s
It is possible to map alternate email addresses to a main address by
providing a file using the following format:
<alias email> = <actual email>
Such a file may be specified with the --aliases option, otherwise a
.hgchurn file will be looked for in the working directory root.
revrange count rate for the specified revision or range
date count rate for revisions matching date spec
template TEMPLATE to group changesets (default: {author|email})
dateformat FORMAT strftime-compatible format for grouping by date
changesets count rate by number of changesets
sort sort by key (default: sort by count)
include include names matching the given patterns
exclude exclude names matching the given patterns
"""
args = util.cmdbuilder('churn',
r=revrange, c=changesets, t=template,
f=dateformat, s=sort,
d=date, I=include, X=exclude, *files)
args.extend(['--config', 'extensions.hgext.churn='])
return self.rawcommand(args)
def purge(self, dirs=None, all=False, include=None, exclude=None, p=False,
abortonerr=False):
"""
aliases: clean
removes files not tracked by Mercurial
Delete files not known to Mercurial. This is useful to test local and
uncommitted changes in an otherwise-clean source tree.
This means that purge will delete:
- Unknown files: files marked with "?" by "hg status"
- Empty directories: in fact Mercurial ignores directories unless they
contain files under source control management
But it will leave untouched:
- Modified and unmodified tracked files
- Ignored files (unless --all is specified)
- New files added to the repository (with "hg add")
If directories are given on the command line, only files in these
directories are considered.
Be careful with purge, as you could irreversibly delete some files you
forgot to add to the repository. If you only want to print the list of
files that this program would delete, use the --print option.
Return True on success
all - purge ignored files too
include - include names matching the given patterns
exclude - exclude names matching the given patterns
abortonerror - abort if an error occurs
p - print filenames instead of deleting them
"""
if not isinstance(dirs, list):
dirs = [dirs]
args = util.cmdbuilder(
'purge', all=all, I=include, X=exclude, p=p, a=abortonerr, *dirs)
args.extend(['--config', 'extensions.hgext.purge='])
eh = util.reterrorhandler(args)
self.rawcommand(args, eh=eh)
return bool(eh)
client.hgclient = HgClientExtensions | /repoman-scm-0.7.1.tar.gz/repoman-scm-0.7.1/repoman/hg/hglibext.py | 0.726523 | 0.261304 | hglibext.py | pypi |
# `repomate-junit4`, a JUnit 4.12 test runner plugin for `repomate`
[](https://travis-ci.com/slarse/repomate-junit4)
[](https://codecov.io/gh/slarse/repomate-junit4)
[](https://badge.fury.io/py/repomate-junit4)


[](LICENSE)
This is a plugin for [repomate](https://github.com/slarse/repomate) that runs
JUnit4 test classes on production classes in a cloned student repo.
## Requirements
`repomate-junit4` has a few non-Python dependencies.
1. `java` must ba available from the command line.
2. `javac` must be available from the command line.
- In other words, install a `JDK` version that is compatible with the files
you intend to test!
3. `junit-4.12.jar` must be available on the `CLASSPATH` variable, or configured
(see [Added CLI arguments](#added-cli-arguments) and
[Configuration file](#configuration-file)).
4. `hamcrest-core-1.3.jar` must be available on the `CLASSPATH` variable or
configured in order to make use of `hamcrest` matchers.
The `hamcrest` and `junit` jars ar available from Maven Central:
```bash
wget http://central.maven.org/maven2/junit/junit/4.12/junit-4.12.jar
wget http://central.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar
```
## Install
The recommended way to install `repomate-junit4` is with `pip`.
```bash
python3 -m pip install --user repomate-junit4
```
The plugin itself does not actually require `repomate`, but it is fairly
useless without. If `repomate` and `repomate-junit4` are both installed in the
same environment, then `repomate` should just find `repomate-junit4`.
For `repomate` to actually use `repomate-junit4`, it must be configured
in the `repomate` configuration file. Refer to the
[`repomate` docs](https://repomate.readthedocs.io/en/latest/configuration.html)
for information on the configuration file and its expected location, and the
[Configuration file](#configuration-file) section here for info on what you
need to add to it.
## Usage
### Terminology and conventions
This is terminology added in excess to that which is defined in the [`repomate`
docs](https://repomate.readthedocs.io/en/latest/fundamentals.html#terminology).
For brevity, some conventions expected by `repomate-junit4` are baked into
these definitions.
* _Production class:_ A Java file/class written in the student repo.
* _Test file_: A file ending in `Test.java` which contains a test class for
some production class. If the students are supposed to write a file called
`LinkedList.java`, the corresponding test class must be called
`LinkedListTest.java`.
* _Test dir_: A directory named after a master repo, containing tests for
student repos based on that master repo. Should contain test files
as defined above.
* _Reference tests directory (RTD)_: A local directory containing subdirectories
with reference tests. Each subdirectory should be a test dir as defined above.
### Security aspects
There are some inconvenient security implications to running untrusted code on
your own computer. `repomate-junit4` tries to limit what a student's code can
do by running with a very strict JVM
[Security Policy](https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html).
This is enforced by the Java
[SecurityManager](https://docs.oracle.com/javase/8/docs/api/java/lang/SecurityManager.html).
The policy used looks like this:
```
// empty grant to strip all permissions from all codebases
grant {
};
// the `junit-4.12.jar` needs this permission for introspection
grant codeBase "file:{junit4_jar_path}" {{
permission java.lang.RuntimePermission "accessDeclaredMembers";
}};
"""
```
This policy disallows student code from doing most illicit things, such as
accessing files outside of the codebases's directory, or accessing the network.
The `{junit4_jar_path}` is dynamically resolved during runtime, and will lend
the actual `junit-4.12.jar` archive that is used to run the test classes
sufficient permissions to do so.
This policy seems to work well for introductory courses in Java, but there may
be snags because of how restrictive it is. If you find that some permission
should definitely be added, please
[open an issue about it](https://github.com/slarse/repomate-junit4/issues/new)
There are plans to add the ability to specify a custom security policy, but
currently, your only choice is to either use this default policy or disable it
with `--disable-security`.
> **Important:** The security policy relies on the correctness of the Java
> SecurityManager. It is probably not bulletproof, so if you have strict
> security requirements, you should probably only run this plugin inside of a
> properly secured environment (for example, a virtual machine).
### Added CLI arguments
`repomate-junit4` adds four new CLI arguments to the `repomate clone` command.
* `-rtd|--reference-tests-dir`
- Path to the RTD.
- **Required** unless specified in the configuration file.
* `-junit|--junit-path`
- Path to the `junit-4.12.jar` library.
- **Required** unless specified on the `CLASSPATH` variable, or in the
configuration file.
* `-ham|--hamcrest-path`
- Path to the `hamcrest-core-1.3.jar` library.
- **Required** unless specified on the `CLASSPATH` variable, or in the
configuration file.
* `-i|--ignore-tests`
- A whitespace separated list of test files (e.g. `LinkedListTest.java`) to
ignore. This is useful for example if there are abstract test classes in
the test dir.
* `--disable-security`
- Disable the seurity policy.
* `-v|--verbose`
- Display more verbose information (currently only concerns test failures).
- Long lines are truncated.
* `-vv|--very-verbose`
- Same as `-v`, but without truncation.
### Configuration file
First and foremost, `junit4` must be added to the `plugins` option under the
`[DEFAULTS]` section in order to activate the plugin,
[see details here](https://repomate.readthedocs.io/en/latest/plugins.html#using-existing-plugins).
The `--hamcrest-path`, `--junit-path` and `--reference-tests-dir` arguments can
be configured in the standard
[`repomate` configuration file](https://repomate.readthedocs.io/en/latest/configuration.html)
by adding the `[junit4]` section heading. Example:
```bash
[DEFAULTS]
plugins = junit4
[junit4]
reference_tests_dir = /absolute/path/to/rtd
junit_path = /absolute/path/to/junit-4.12.jar
hamcrest_path = /absolute/path/to/hamcrest-core-1.3.jar
```
> **Important:** All of the paths in the config must be absolute for
> `repomate-junit4` to behave as expected.
| /repomate-junit4-0.5.0.tar.gz/repomate-junit4-0.5.0/README.md | 0.646237 | 0.927462 | README.md | pypi |
import pathlib
import re
import os
import sys
import subprocess
from typing import Iterable, Tuple, Union, List
import repomate_plug as plug
from repomate_plug import Status
from repomate_junit4 import SECTION
def is_abstract_class(class_: pathlib.Path) -> bool:
"""Check if the file is an abstract class.
Args:
class_: Path to a Java class file.
Returns:
True if the class is abstract.
"""
assert class_.name.endswith(".java")
regex = r"^\s*?(public\s+)?abstract\s+class\s+{}".format(class_.name[:-5])
match = re.search(
regex,
class_.read_text(encoding=sys.getdefaultencoding()),
flags=re.MULTILINE,
)
return match is not None
def generate_classpath(*paths: pathlib.Path, classpath: str = "") -> str:
"""Return a classpath including all of the paths provided. Always appends
the current working directory to the end.
Args:
paths: One or more paths to add to the classpath.
classpath: An initial classpath to append to.
Returns:
a formated classpath to be used with ``java`` and ``javac``
"""
for path in paths:
classpath += ":{!s}".format(path)
classpath += ":."
return classpath
def extract_package(class_: pathlib.Path) -> str:
"""Return the name package of the class. An empty string
denotes the default package.
"""
assert class_.name.endswith(".java")
# yes, $ is a valid character for a Java identifier ...
ident = r"[\w$][\w\d_$]*"
regex = r"^\s*?package\s+({ident}(.{ident})*);".format(ident=ident)
with class_.open(encoding=sys.getdefaultencoding(), mode="r") as file:
# package statement must be on the first line
first_line = file.readline()
matches = re.search(regex, first_line)
if matches:
return matches.group(1)
return ""
def fqn(package_name: str, class_name: str) -> str:
"""Return the fully qualified name (Java style) of the class.
Args:
package_name: Name of the package. The default package should be an
empty string.
class_name: Canonical name of the class.
Returns:
The fully qualified name of the class.
"""
return (
class_name
if not package_name
else "{}.{}".format(package_name, class_name)
)
def properly_packaged(path: pathlib.Path, package: str) -> bool:
"""Check if the path ends in a directory structure that corresponds to the
package.
Args:
path: Path to a Java file.
package: The name of a Java package.
Returns:
True iff the directory structure corresponds to the package name.
"""
required_dir_structur = package.replace(".", os.path.sep)
return str(path).endswith(required_dir_structur)
def extract_package_root(class_: pathlib.Path, package: str) -> pathlib.Path:
"""Return the package root, given that class_ is the path to a .java file.
If the package is the default package (empty string), simply return a copy
of class_.
Raise if the directory structure doesn't correspond to the package
statement.
"""
_check_directory_corresponds_to_package(class_.parent, package)
root = class_.parent
if package:
root = class_.parents[len(package.split("."))]
return root
def javac(
java_files: Iterable[Union[str, pathlib.Path]], classpath: str
) -> Tuple[str, str]:
"""Run ``javac`` on all of the specified files, assuming that they are
all ``.java`` files.
Args:
java_files: paths to ``.java`` files.
classpath: The classpath to set.
Returns:
(status, msg), where status is e.g. :py:const:`Status.ERROR` and
the message describes the outcome in plain text.
"""
command = ["javac", "-cp", classpath, *[str(path) for path in java_files]]
proc = subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if proc.returncode != 0:
status = Status.ERROR
msg = proc.stderr.decode(sys.getdefaultencoding())
else:
msg = "all files compiled successfully"
status = Status.SUCCESS
return status, msg
def pairwise_compile(
test_classes: List[pathlib.Path],
java_files: List[pathlib.Path],
classpath: str,
) -> Tuple[List[plug.HookResult], List[plug.HookResult]]:
"""Compile test classes with their associated production classes.
For each test class:
1. Find the associated production class among the ``java_files``
2. Compile the test class together with all of the .java files in
the associated production class' directory.
Args:
test_classes: A list of paths to test classes.
java_files: A list of paths to java files from the student repo.
classpath: A base classpath to use.
Returns:
A tuple of lists of HookResults on the form ``(succeeded, failed)``
"""
failed = []
succeeded = []
# only use concrete test classes
concrete_test_classes = filter(
lambda t: not is_abstract_class(t), test_classes
)
for test_class in concrete_test_classes:
status, msg, prod_class_path = _pairwise_compile(
test_class, classpath, java_files
)
if status != Status.SUCCESS:
failed.append(plug.HookResult(SECTION, status, msg))
else:
succeeded.append((test_class, prod_class_path))
return succeeded, failed
def _pairwise_compile(test_class, classpath, java_files):
"""Compile the given test class together with its production class
counterpoint (if it can be found). Return a tuple of (status, msg).
"""
package = extract_package(test_class)
potential_prod_classes = _get_matching_prod_classes(
test_class, package, java_files
)
if len(potential_prod_classes) != 1:
status = Status.ERROR
msg = (
"no production class found for "
if not potential_prod_classes
else "multiple production classes found for "
) + fqn(package, test_class.name)
prod_class_path = None
else:
prod_class_path = potential_prod_classes[0]
adjacent_java_files = [
file
for file in prod_class_path.parent.glob("*.java")
if not file.name.endswith("Test.java")
] + list(test_class.parent.glob("*Test.java"))
status, msg = javac(
[*adjacent_java_files], generate_classpath(classpath=classpath)
)
return status, msg, prod_class_path
def _get_matching_prod_classes(test_class, package, java_files):
"""Find all production classes among the Java files that math the test
classes name and the package.
"""
prod_class_name = test_class.name.replace("Test.java", ".java")
return [
file
for file in java_files
if file.name == prod_class_name and extract_package(file) == package
]
def _check_directory_corresponds_to_package(path: pathlib.Path, package: str):
"""Check that the path ends in a directory structure that corresponds
to the package prefix.
"""
required_dir_structure = package.replace(".", os.path.sep)
if not str(path).endswith(required_dir_structure):
msg = (
"Directory structure does not conform to package statement. Dir:"
" '{}' Package: '{}'".format(path, package)
)
raise ValueError(msg) | /repomate-junit4-0.5.0.tar.gz/repomate-junit4-0.5.0/repomate_junit4/_java.py | 0.777596 | 0.360236 | _java.py | pypi |
import itertools
import os
import argparse
import configparser
import pathlib
from typing import Union, Iterable, Tuple, List
import daiquiri
from colored import bg, style
import repomate_plug as plug
from repomate_plug import Status
from repomate_junit4 import _java
from repomate_junit4 import _junit4_runner
from repomate_junit4 import SECTION
LOGGER = daiquiri.getLogger(__file__)
ResultPair = Tuple[pathlib.Path, pathlib.Path]
DEFAULT_LINE_LIMIT = 150
class _ActException(Exception):
"""Raise if something goes wrong in act_on_clone_repo."""
def __init__(self, hook_result):
self.hook_result = hook_result
class JUnit4Hooks(plug.Plugin):
def __init__(self):
self._master_repo_names = []
self._reference_tests_dir = ""
self._ignore_tests = []
self._hamcrest_path = ""
self._junit_path = ""
self._classpath = os.getenv("CLASSPATH") or ""
self._verbose = False
self._very_verbose = False
self._disable_security = False
def act_on_cloned_repo(
self, path: Union[str, pathlib.Path]
) -> plug.HookResult:
"""Look for production classes in the student repo corresponding to
test classes in the reference tests directory.
Assumes that all test classes end in ``Test.java`` and that there is
a directory with the same name as the master repo in the reference
tests directory.
Args:
path: Path to the student repo.
Returns:
a plug.HookResult specifying the outcome.
"""
assert self._master_repo_names
assert self._reference_tests_dir
try:
path = pathlib.Path(path)
if not path.exists():
return plug.HookResult(
SECTION,
Status.ERROR,
"student repo {!s} does not exist".format(path),
)
compile_succeeded, compile_failed = self._compile_all(path)
tests_succeeded, tests_failed = self._run_tests(compile_succeeded)
msg = self._format_results(
itertools.chain(tests_succeeded, tests_failed, compile_failed)
)
status = (
Status.ERROR
if tests_failed or compile_failed
else Status.SUCCESS
)
return plug.HookResult(SECTION, status, msg)
except _ActException as exc:
return exc.hook_result
except Exception as exc:
raise _ActException(
plug.HookResult(SECTION, Status.ERROR, str(exc))
)
def parse_args(self, args: argparse.Namespace) -> None:
"""Get command line arguments.
Args:
args: The full namespace returned by
:py:func:`argparse.ArgumentParser.parse_args`
"""
self._master_repo_names = args.master_repo_names
self._reference_tests_dir = (
args.reference_tests_dir
if args.reference_tests_dir
else self._reference_tests_dir
)
self._ignore_tests = (
args.ignore_tests if args.ignore_tests else self._ignore_tests
)
self._hamcrest_path = (
args.hamcrest_path if args.hamcrest_path else self._hamcrest_path
)
self._junit_path = (
args.junit_path if args.junit_path else self._junit_path
)
self._verbose = args.verbose
self._very_verbose = args.very_verbose
self._disable_security = (
args.disable_security
if args.disable_security
else self._disable_security
)
def clone_parser_hook(
self, clone_parser: configparser.ConfigParser
) -> None:
"""Add reference_tests_dir argument to parser.
Args:
clone_parser: The ``clone`` subparser.
"""
clone_parser.add_argument(
"-rtd",
"--reference-tests-dir",
help="Path to a directory with reference tests.",
type=str,
required=not self._reference_tests_dir,
)
clone_parser.add_argument(
"-i",
"--ignore-tests",
help="Names of test classes to ignore.",
type=str,
nargs="+",
)
clone_parser.add_argument(
"-ham",
"--hamcrest-path",
help="Absolute path to the `{}` library.".format(
_junit4_runner.HAMCREST_JAR
),
type=str,
# required if not picked up in config_hook nor on classpath
required=not self._hamcrest_path
and _junit4_runner.HAMCREST_JAR not in self._classpath,
)
clone_parser.add_argument(
"-junit",
"--junit-path",
help="Absolute path to the `{}` library.".format(
_junit4_runner.JUNIT_JAR
),
type=str,
# required if not picked up in config_hook nor on classpath
required=not self._junit_path
and _junit4_runner.JUNIT_JAR not in self._classpath,
)
clone_parser.add_argument(
"--disable-security",
help=(
"Disable the default security policy (student code can do "
"whatever)."
),
action="store_true",
)
verbosity = clone_parser.add_mutually_exclusive_group()
verbosity.add_argument(
"-v",
"--verbose",
help="Display more information about test failures.",
action="store_true",
)
verbosity.add_argument(
"-vv",
"--very-verbose",
help="Display the full failure output, without truncating.",
action="store_true",
)
def config_hook(self, config_parser: configparser.ConfigParser) -> None:
"""Look for hamcrest and junit paths in the config, and get the classpath.
Args:
config: the config parser after config has been read.
"""
self._hamcrest_path = config_parser.get(
SECTION, "hamcrest_path", fallback=self._hamcrest_path
)
self._junit_path = config_parser.get(
SECTION, "junit_path", fallback=self._junit_path
)
self._reference_tests_dir = config_parser.get(
SECTION, "reference_tests_dir", fallback=self._reference_tests_dir
)
def _compile_all(
self, path: pathlib.Path
) -> Tuple[List[ResultPair], List[plug.HookResult]]:
"""Attempt to compile all java files in the repo.
Returns:
a tuple of lists ``(succeeded, failed)``, where ``succeeded``
are tuples on the form ``(test_class, prod_class)`` paths.
"""
java_files = list(path.rglob("*.java"))
master_name = self._extract_master_repo_name(path)
test_classes = self._find_test_classes(master_name)
compile_succeeded, compile_failed = _java.pairwise_compile(
test_classes, java_files, classpath=self._generate_classpath()
)
return compile_succeeded, compile_failed
def _extract_master_repo_name(self, path: pathlib.Path) -> str:
"""Extract the master repo name from the student repo at ``path``. For
this to work, the corresponding master repo name must be in
self._master_repo_names.
Args:
path: path to the student repo
Returns:
the name of the associated master repository
"""
matches = list(filter(path.name.endswith, self._master_repo_names))
if len(matches) == 1:
return matches[0]
else:
msg = (
"no master repo name matching the student repo"
if not matches
else "multiple matching master repo names: {}".format(
", ".join(matches)
)
)
res = plug.HookResult(SECTION, Status.ERROR, msg)
raise _ActException(res)
def _find_test_classes(self, master_name) -> List[pathlib.Path]:
"""Find all test classes (files ending in ``Test.java``) in directory
at <reference_tests_dir>/<master_name>.
Args:
master_name: Name of a master repo.
Returns:
a list of test classes from the corresponding reference test
directory.
"""
test_dir = pathlib.Path(self._reference_tests_dir) / master_name
if not (test_dir.exists() and test_dir.is_dir()):
res = plug.HookResult(
SECTION,
Status.ERROR,
"no reference test directory for {} in {}".format(
master_name, self._reference_tests_dir
),
)
raise _ActException(res)
test_classes = [
file
for file in test_dir.rglob("*.java")
if file.name.endswith("Test.java")
and file.name not in self._ignore_tests
]
if not test_classes:
res = plug.HookResult(
SECTION,
Status.WARNING,
"no files ending in `Test.java` found in {!s}".format(
test_dir
),
)
raise _ActException(res)
return test_classes
def _format_results(self, hook_results: Iterable[plug.HookResult]):
"""Format a list of plug.HookResult tuples as a nice string.
Args:
hook_results: A list of plug.HookResult tuples.
Returns:
a formatted string
"""
backgrounds = {
Status.ERROR: bg("red"),
Status.WARNING: bg("yellow"),
Status.SUCCESS: bg("dark_green"),
}
def test_result_string(status, msg):
return "{}{}:{} {}".format(
backgrounds[status],
status,
style.RESET,
_truncate_lines(msg) if self._verbose else msg,
)
return os.linesep.join(
[
test_result_string(status, msg)
for _, status, msg in hook_results
]
)
def _run_tests(
self, test_prod_class_pairs: ResultPair
) -> Tuple[List[plug.HookResult], List[plug.HookResult]]:
"""Run tests and return the results.
Args:
test_prod_class_pairs: A list of tuples on the form
``(test_class_path, prod_class_path)``
Returns:
A tuple of lists ``(succeeded, failed)`` containing HookResult
tuples.
"""
succeeded = []
failed = []
classpath = self._generate_classpath()
with _junit4_runner.security_policy(
classpath, active=not self._disable_security
) as security_policy:
for test_class, prod_class in test_prod_class_pairs:
status, msg = _junit4_runner.run_test_class(
test_class,
prod_class,
classpath=classpath,
verbose=self._verbose or self._very_verbose,
security_policy=security_policy,
)
if status != Status.SUCCESS:
failed.append(plug.HookResult(SECTION, status, msg))
else:
succeeded.append(plug.HookResult(SECTION, status, msg))
return succeeded, failed
def _generate_classpath(self, *paths: pathlib.Path) -> str:
"""
Args:
paths: One or more paths to add to the classpath.
Returns:
a formated classpath to be used with ``java`` and ``javac``
"""
warn = (
"`{}` is not configured and not on the CLASSPATH variable."
"This will probably crash."
)
if not (
self._hamcrest_path
or _junit4_runner.HAMCREST_JAR in self._classpath
):
LOGGER.warning(warn.format(_junit4_runner.HAMCREST_JAR))
if not (
self._junit_path or _junit4_runner.JUNIT_JAR in self._classpath
):
LOGGER.warning(warn.format(_junit4_runner.JUNIT_JAR))
paths = list(paths)
if self._hamcrest_path:
paths.append(self._hamcrest_path)
if self._junit_path:
paths.append(self._junit_path)
return _java.generate_classpath(*paths, classpath=self._classpath)
def _truncate_lines(string: str, max_len: int = DEFAULT_LINE_LIMIT):
"""Truncate lines to max_len characters."""
trunc_msg = " #[...]# "
if max_len <= len(trunc_msg):
raise ValueError(
"max_len must be greater than {}".format(len(trunc_msg))
)
effective_len = max_len - len(trunc_msg)
head_len = effective_len // 2
tail_len = effective_len // 2
def truncate(s):
if len(s) > max_len:
return s[:head_len] + trunc_msg + s[-tail_len:]
return s
return os.linesep.join(
[truncate(line) for line in string.split(os.linesep)]
) | /repomate-junit4-0.5.0.tar.gz/repomate-junit4-0.5.0/repomate_junit4/junit4.py | 0.831964 | 0.164785 | junit4.py | pypi |
import pathlib
import tempfile
import re
import sys
import subprocess
import os
import contextlib
from typing import Tuple, Optional
import daiquiri
from repomate_plug import Status
from repomate_junit4 import _java
LOGGER = daiquiri.getLogger(__file__)
HAMCREST_JAR = "hamcrest-core-1.3.jar"
JUNIT_JAR = "junit-4.12.jar"
_DEFAULT_SECURITY_POLICY_TEMPLATE = """grant {{
}};
grant codeBase "file:{junit4_jar_path}" {{
permission java.lang.RuntimePermission "accessDeclaredMembers";
}};
"""
@contextlib.contextmanager
def security_policy(classpath: str, active: bool):
"""Yield the path to the default security policy file if ``active``,
else yield None. The policy file is deleted once the context is
exited.
TODO: Make it possible to use a custom security policy here.
"""
if not active:
LOGGER.warning(
"Security policy disabled, student code running without "
"restrictions"
)
yield
return
with tempfile.NamedTemporaryFile() as security_policy_file:
policy = _generate_default_security_policy(classpath)
security_policy_file.write(
policy.encode(encoding=sys.getdefaultencoding())
)
security_policy_file.flush()
yield pathlib.Path(security_policy_file.name)
def _generate_default_security_policy(classpath: str) -> str:
"""Generate the default security policy from the classpath. ``junit-4.12.jar``
must be on the classpath.
"""
escaped_junit_jar = JUNIT_JAR.replace(".", r"\.")
pattern = "[^{sep}]*{junit_jar}".format(
sep=os.pathsep, junit_jar=escaped_junit_jar
)
junit_jar_matches = re.search(pattern, classpath)
if not junit_jar_matches:
raise ValueError("{} not on the classpath".format(JUNIT_JAR))
path = junit_jar_matches.group(0)
return _DEFAULT_SECURITY_POLICY_TEMPLATE.format(junit4_jar_path=path)
def get_num_failed(test_output: bytes) -> int:
"""Get the amount of failed tests from the error output of JUnit4."""
decoded = test_output.decode(encoding=sys.getdefaultencoding())
match = re.search(r"Failures: (\d+)", decoded)
# TODO this is a bit unsafe, what if there is no match?
return int(match.group(1))
def parse_failed_tests(test_output: bytes) -> str:
"""Return a list of test failure descriptions, excluding stack traces."""
decoded = test_output.decode(encoding=sys.getdefaultencoding())
return re.findall(
r"^\d\) .*(?:\n(?!\s+at).*)*", decoded, flags=re.MULTILINE
)
def _extract_conforming_package(test_class, prod_class):
"""Extract a package name from the test and production class.
Raise if the test class and production class have different package
statements.
"""
test_package = _java.extract_package(test_class)
prod_package = _java.extract_package(prod_class)
if test_package != prod_package:
msg = (
"Test class {} in package {}, but class {} in package {}"
).format(test_class.name, test_package, prod_class.name, prod_package)
raise ValueError(msg)
return test_package
def run_test_class(
test_class: pathlib.Path,
prod_class: pathlib.Path,
classpath: str,
verbose: bool = False,
security_policy: Optional[pathlib.Path] = None,
) -> Tuple[str, str]:
"""Run a single test class on a single production class.
Args:
test_class: Path to a Java test class.
prod_class: Path to a Java production class.
classpath: A classpath to use in the tests.
verbose: Whether to output more failure information.
Returns:
()
"""
package = _extract_conforming_package(test_class, prod_class)
prod_class_dir = _java.extract_package_root(prod_class, package)
test_class_dir = _java.extract_package_root(test_class, package)
test_class_name = test_class.name[
: -len(test_class.suffix)
] # remove .java
test_class_name = _java.fqn(package, test_class_name)
classpath = _java.generate_classpath(
test_class_dir, prod_class_dir, classpath=classpath
)
command = ["java"]
if security_policy:
command += [
"-Djava.security.manager",
"-Djava.security.policy=={!s}".format(security_policy),
]
command += [
"-cp",
classpath,
"org.junit.runner.JUnitCore",
test_class_name,
]
proc = subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return _extract_results(proc, test_class_name, verbose)
def _extract_results(
proc: subprocess.CompletedProcess, test_class_name: str, verbose: bool
) -> Tuple[str, str]:
"""Extract and format results from a completed test run."""
if proc.returncode != 0:
status = Status.ERROR
msg = "Test class {} failed {} tests".format(
test_class_name, get_num_failed(proc.stdout)
)
if verbose:
msg += os.linesep + os.linesep.join(
parse_failed_tests(proc.stdout)
)
else:
msg = "Test class {} passed!".format(test_class_name)
status = Status.SUCCESS
return status, msg | /repomate-junit4-0.5.0.tar.gz/repomate-junit4-0.5.0/repomate_junit4/_junit4_runner.py | 0.482917 | 0.181372 | _junit4_runner.py | pypi |
from typing import Dict, List
from repomate_plug import exception
from repomate_plug import corehooks
from repomate_plug import exthooks
from repomate_plug import util
_HOOK_METHODS = {
key: value
for key, value in [
*exthooks.CloneHook.__dict__.items(),
*corehooks.PeerReviewHook.__dict__.items()
] if callable(value) and not key.startswith('_')
}
class _PluginMeta(type):
"""Metaclass used for converting methods with appropriate names into
hook methods.
Also ensures that all public methods have the name of a hook method.
Checking signatures is handled by pluggy on registration.
"""
def __new__(cls, name, bases, attrdict):
"""Check that all public methods have hook names, convert to hook
methods and return a new instance of the class. If there are any
public methods that have non-hook names,
:py:function:`repomate_plug.exception.HookNameError` is raised.
Checking signatures is delegated to ``pluggy`` during registration of
the hook.
"""
methods = cls._extract_public_methods(attrdict)
cls._check_names(methods)
hooked_methods = {
name: util.hookimpl(method)
for name, method in methods.items()
}
attrdict.update(hooked_methods)
return super().__new__(cls, name, bases, attrdict)
@staticmethod
def _check_names(methods):
hook_names = set(_HOOK_METHODS.keys())
method_names = set(methods.keys())
if not method_names.issubset(hook_names):
raise exception.HookNameError(
"public method(s) with non-hook name: {}".format(
", ".join(method_names - hook_names)))
@staticmethod
def _extract_public_methods(attrdict):
return {
key: value
for key, value in attrdict.items()
if callable(value) and not key.startswith('_')
}
class Plugin(metaclass=_PluginMeta):
"""Base class for plugin classes. For plugin classes to be picked up by
``repomate``, they must inherit from this class.
Public methods must be hook methods, i.e. implement the specification of
one of the hooks defined in :py:mod:`~repomate_plug.corehooks.PeerReviewHook`
or :py:mod:`~repomate_plug.exthooks.CloneHook`. If there are any other
public methods, an error is raised on class creation. As long as the method
has the correct name, it will be recognized as a hook method.
The signature of the method is not checked until the hook is registered by
the :py:const:`repomate_plug.manager` (an instance of
:py:class:`pluggy.manager.PluginManager`). Therefore, when testing a plugin,
it is a good idea to include a test where it is registered with the manager
to ensure that it has the correct signatures.
Private methods (i.e. methods prefixed with ``_``) carry no such
restrictions.
""" | /repomate-plug-0.4.1.tar.gz/repomate-plug-0.4.1/repomate_plug/pluginmeta.py | 0.89441 | 0.206414 | pluginmeta.py | pypi |
# Repomate
[](https://travis-ci.com/slarse/repomate)
[](https://codecov.io/gh/slarse/repomate)
[](http://repomate.readthedocs.io/en/latest/)
[](https://badge.fury.io/py/repomate)


[](LICENSE)
[](https://github.com/ambv/black)
## Overview
Repomate is A CLI tool for administrating large amounts of GitHub
repositories, geared towards teachers and GitHub Enterprise. Repomate is
currently being used for the introductory courses in computer science at
[KTH Royal Technical Institute of Technology](https://www.kth.se/en/eecs). The
courses have roughly 200 students and several thousands of repositories,
allowing us to test Repomate at quite a large scale.
### Install
Repomate is on PyPi, so `python3 -m pip install repomate` should do the trick. See the
[install instructions](https://repomate.readthedocs.io/en/latest/install.html)
for more elaborate instructions.
### Getting started
The best way to get started with Repomate is to head over to the
[Docs](https://repomate.readthedocs.io/en/latest/), where you (among other
things) will find the
[user guide](https://repomate.readthedocs.io/en/latest/userguide.html).
It covers the use of Repomate's varous commands by way of practical example,
and should set you on the right path with little effort.
## Why Repomate?
Repomate was developed at KTH Royal Technical Institute of Technology to help
teachers and teaching assistants administrate GitHub repositories. It is
inspired by the old
[`teachers_pet` tool](https://github.com/education/teachers_pet), with added
features and a user experience more to our liking. Features range from creating
student repositories based on master (template) repos, to opening and closing
issues in bulk, to assigning peer reviews and cloning repos in bulk. Some parts
of Repomate can be customized using a simple but powerful [plugin
system](https://github.com/slarse/repomate-plug). For example, the
[`repomate-junit4` plugin](https://github.com/slarse/repomate-junit4) allows for
automatically running `JUnit4` test classes on production code in student repos.
Below is the output from running `repomate -h`, giving brief descriptions of
each of the main features:
```
$ repomate -h
usage: repomate [-h] [-v]
{show-config,setup,update,migrate,clone,open-issues,
close-issues,list-issues,assign-reviews,
purge-review-teams,check-reviews,verify-settings}
...
A CLI tool for administering large amounts of git repositories on GitHub
instances. See the full documentation at https://repomate.readthedocs.io
positional arguments:
{show-config,setup,update,migrate,clone,open-issues,
close-issues,list-issues,assign-reviews,
purge-review-teams,check-reviews,verify-settings}
show-config Show the configuration file
setup Setup student repos.
update Update existing student repos.
migrate Migrate master repositories into the target
organization.
clone Clone student repos.
open-issues Open issues in student repos.
close-issues Close issues in student repos.
list-issues List issues in student repos.
assign-reviews Randomly assign students to peer review each others'
repos.
purge-review-teams Remove all review teams associated with the specified
students and master repos.
check-reviews Fetch all peer review teams for the specified student
repos, and check which assigned reviews have been done
(i.e. which issues have been opened).
verify-settings Verify your settings, such as the base url and the
OAUTH token.
optional arguments:
-h, --help show this help message and exit
-v, --version Display version info
```
## Roadmap
As of December 17th 2018, Repomate's CLI is a stable release and adheres to
[Semantic Versioning 2.0.0](https://semver.org/spec/v2.0.0.html). The internals
of Repomate _do not_ adhere to this versioning, so using Repomate as a library
is not recommended.
The plugin system is still to be considered in the beta phase, as it has seen
much less live action use than the rest of the CLI. Features are highly
unlikely to be cut, but hooks may be modified as new use-cases arise.
## License
This software is licensed under the MIT License. See the [LICENSE](LICENSE)
file for specifics.
| /repomate-1.1.1.tar.gz/repomate-1.1.1/README.md | 0.450118 | 0.956227 | README.md | pypi |
from datetime import datetime
from typing import Dict, NamedTuple, Optional, Tuple
from repono import AsyncStorageHandlerBase, FileItem, StorageHandlerBase
class DummyFile(NamedTuple):
contents: bytes
atime: datetime
ctime: datetime
mtime: datetime
class DummyHandler(StorageHandlerBase):
"""
Dummy class for testing.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Store files where the key is the url path and the value is
# a named tuple containing the contents of the file, the access
# time, the creation time, and the time of last modification.
self.files: Dict[str, DummyFile] = {}
self.last_save: Optional[FileItem] = None
self.last_save_contents: bytes = b""
self.last_delete: Optional[FileItem] = None
self.validated = False
def _validate(self) -> None:
self.validated = True
def get_file_key(self, item: FileItem) -> FileItem:
return item.copy(data=None)
def _exists(self, item: FileItem) -> bool:
"""
Indicate if the given file exists within the given folder.
"""
return item.url_path in self.files
def assert_exists(self, filename: str, path: Tuple[str, ...]) -> None:
"""
Assert that the given file exists in the dummy file system.
"""
assert self._exists(FileItem(filename=filename, path=path))
def _get_size(self, item: FileItem) -> int:
"""
Indicate if the given file size is equal to the anticipated size.
"""
return len(self.files[item.url_path].contents)
def assert_get_size(self, filename: str, path: Tuple[str, ...], size: int) -> None:
"""
Assert that given file size is equal to the anticipated size.
"""
assert self._get_size(FileItem(filename=filename, path=path)) == size
def _get_accessed_time(self, item: FileItem) -> datetime:
"""
Indicate if the given file access time is equal to the anticipated time.
"""
return self.files[item.url_path].atime
def assert_get_accessed_time(
self, filename: str, path: Tuple[str, ...], date: datetime
) -> None:
"""
Assert that given file access time is equal to the anticipated time.
"""
assert self._get_accessed_time(FileItem(filename=filename, path=path)) == date
def _get_created_time(self, item: FileItem) -> datetime:
"""
Indicate if the given file creation time is equal to the anticipated time.
"""
return self.files[item.url_path].ctime
def assert_get_created_time(
self, filename: str, path: Tuple[str, ...], date: datetime
) -> None:
"""
Assert that given file creation time is equal to the anticipated time.
"""
assert self._get_created_time(FileItem(filename=filename, path=path)) == date
def _get_modified_time(self, item: FileItem) -> datetime:
"""
Indicate if the given file modification time is equal to the anticipated time.
"""
return self.files[item.url_path].mtime
def assert_get_modified_time(
self, filename: str, path: Tuple[str, ...], date: datetime
) -> None:
"""
Assert that given file modification time is equal to the anticipated time.
"""
assert self._get_modified_time(FileItem(filename=filename, path=path)) == date
def _save(self, item: FileItem) -> str:
"""
Save the provided file to the given filename in the storage
container.
"""
with item as f:
self.last_save_contents = f.read()
self.files[item.url_path] = DummyFile(
self.last_save_contents,
datetime.now(),
datetime.now(),
datetime.now(),
)
f.seek(0)
self.last_save = item
return item.filename
def assert_file_contains(
self, filename: str, path: Tuple[str, ...], data: bytes
) -> None:
"""
Assert that the given file contains the given data.
"""
item = FileItem(filename=filename, path=path)
assert self.files[item.url_path] == data
def _delete(self, item: FileItem) -> None:
"""
Delete the given filename from the storage container, whether or not
it exists.
"""
del self.files[item.url_path]
self.last_delete = item
class AsyncDummyHandler(AsyncStorageHandlerBase, DummyHandler):
"""
Dummy class for testing.
"""
allow_async = True
def get_file_key(self, item: FileItem) -> FileItem:
return item.copy(data=None)
async def _async_exists(self, item: FileItem) -> bool:
"""
Indicate if the given file exists within the given folder.
"""
return item.url_path in self.files
def assert_exists(self, filename: str, path: Tuple[str, ...]) -> None:
"""
Assert that the given file exists in the dummy file system.
"""
assert self._exists(FileItem(filename=filename, path=path))
async def _async_get_size(self, item: FileItem) -> int:
"""
Indicate if the given file size is equal to the anticipated size.
"""
return len(self.files[item.url_path].contents)
def assert_get_size(self, filename: str, path: Tuple[str, ...], size: int) -> None:
"""
Assert that given file size is equal to the anticipated size.
"""
assert self._get_size(FileItem(filename=filename, path=path)) == size
async def _async_get_accessed_time(self, item: FileItem) -> datetime:
"""
Indicate if the given file access time is equal to the anticipated time.
"""
return self.files[item.url_path].atime
def assert_get_accessed_time(
self, filename: str, path: Tuple[str, ...], date: datetime
) -> None:
"""
Assert that given file access time is equal to the anticipated time.
"""
assert self._get_accessed_time(FileItem(filename=filename, path=path)) == date
async def _async_get_created_time(self, item: FileItem) -> datetime:
"""
Indicate if the given file creation time is equal to the anticipated time.
"""
return self.files[item.url_path].ctime
def assert_get_created_time(
self, filename: str, path: Tuple[str, ...], date: datetime
) -> None:
"""
Assert that given file creation time is equal to the anticipated time.
"""
assert self._get_created_time(FileItem(filename=filename, path=path)) == date
async def _async_get_modified_time(self, item: FileItem) -> datetime:
"""
Indicate if the given file modification time is equal to the anticipated time.
"""
return self.files[item.url_path].mtime
def assert_get_modified_time(
self, filename: str, path: Tuple[str, ...], date: datetime
) -> None:
"""
Assert that given file modification time is equal to the anticipated time.
"""
assert self._get_modified_time(FileItem(filename=filename, path=path)) == date
async def _async_save(self, item: FileItem) -> str:
"""
Save the provided file to the given filename in the storage
container. Returns the name of the file saved.
"""
async with item as f:
self.last_save_contents = await f.read()
self.files[item.url_path] = DummyFile(
self.last_save_contents,
datetime.now(),
datetime.now(),
datetime.now(),
)
await f.seek(0)
self.last_save = item
return item.filename
def assert_file_contains(
self, filename: str, path: Tuple[str, ...], data: bytes
) -> None:
"""
Assert that the given file contains the given data.
"""
item = FileItem(filename=filename, path=path)
assert self.files[item.url_path] == data
async def _async_delete(self, item: FileItem) -> None:
"""
Delete the given filename from the storage container, whether or not
it exists.
"""
del self.files[item.url_path]
self.last_delete = item | /handlers/dummy.py | 0.904499 | 0.417925 | dummy.py | pypi |
# reporeleaser
<!-- badges start -->
[![Build Status][travisbadge]][travislink]
[![PyPI version][pypibadge]][pypilink]
[![Maintained][Maintained]](#)
[![BuyMeCoffee][buymecoffeebadge]][buymecoffeelink]
<!-- badges end -->
_Create a new release for your repository, with a commit changelog._
This will create a new release for your repository.
In the description of that release it will list all commits since the last release.
## Installation
```bash
python3 -m pip install -U reporeleaser
```
# Example usage
```bash
reporeleaser --token aaabbbccc111222333 --repo reporeleaser --release minor
```
This example will create a new release for `reporeleaser` with this information:
**Tag name:** `0.1.0`
**Release name:** `0.1.0`
**Release description:**
```markdown
## Changes
- Use regex to find previous tag
- Fix error handing for missing regex match
- Split code into multiple methods
- Create default tag_sha for last_release
- Add failsafe for new_commits
- use username and not name
- Use login not username
- correct versioning for major / minor
[Full Changelog][changelog]
***
This release was created with [reporeleaser][reporeleaser] :tada:
[reporeleaser]: https://pypi.org/project/reporeleaser/
[changelog]: https://github.com/ludeeus/reporeleaser/compare/0.0.4...0.1.0
```
**NB!: it is recommended to run it one time with [`--test`](#option---test) option to make sure the data is correct.**
# CLI options
## option `--token`
A personal access token for your GitHub account.
This token **needs** at least access to the `repo\public_repo` scope.
You can create a new token in the [Developer settings](https://github.com/settings/tokens/new).

_You should have 1 access token pr integration you use._
**Example:** `--token aaabbbccc111222333`
## option `--repo`
The name of the repository you are creating a release for.
The structure needs to be `ludeeus/reporeleaser`, if this repository are on your user
account, you can just use `reporeleaser`.
**Examples:**
- `--repo ludeeus/reporeleaser`
- `--repo reporeleaser`
## option `--release`
This is the type of release you are doing, this will be used to create the new tag with the correct version number.
Possible values:
- `major`
- `minor`
- `patch`
If you do not follow this schema for you tags you can use a custom one.
**Example:** `--release patch`
**Example for custom:** `--release v.1.1.1`
## option `--test`
_This option is a flag._
When this is enabled, a release will **not** be created/published.
This will print to the console how the release will look, giving you a chance to look things over.
## option `--draft`
_This option is a flag._
This creates a release draft instead of publishing the release, this can be useful if you need to add more/change the text in the release description.
## option `--prerelease`
_This option is a flag._
This creates a release pre-release instead of publishing the release.
## option `--show_sha`
_This option is a flag._
When this is enabled every commit line in the release description will be prefixed by the first 7 characters in the sha, which GitHub turns in to a link to that commit.
An example of this can be seen here [https://github.com/ludeeus/reporeleaser/releases/tag/0.3.0](https://github.com/ludeeus/reporeleaser/releases/tag/0.3.0)
## option `--title`
The name you want to use for your release, this defaults to the tag name.
_If your name contains spaces you **need** to add quotes around it._
**Example:** `--title "My Release Title`
## option `--hide_footer`
_This option is a flag._
By default this will be added to the bottom of every release created with this tool.
```markdown
***
This release was created with [reporeleaser][reporeleaser] :tada:
[reporeleaser]: https://pypi.org/project/reporeleaser/
```
You create releases for your projects, so you should be in control, but it's much appreciated if you leave this in.
By enabling this flag, this will not be added.
## option `--hide_full_changelog`
_This option is a flag._
By default this will be added to the bottom of every release created with this tool.
The link will automatically be updated to match the repository and the correct diff for this release.
```markdown
[Full Changelog][changelog]
[changelog]: https://github.com/ludeeus/reporeleaser/compare/1.0.0...1.1.0
```
By enabling this flag, this will not be added.
## option `--version`
_This option is a flag._
Prints the installed version of `reporeleaser`.
## option `--help`
Show available options.
<!-- links start -->
[buymecoffeelink]:https://www.buymeacoffee.com/ludeeus
[buymecoffeebadge]: https://camo.githubusercontent.com/cd005dca0ef55d7725912ec03a936d3a7c8de5b5/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f6275792532306d6525323061253230636f666665652d646f6e6174652d79656c6c6f772e737667
[maintained]: https://img.shields.io/maintenance/yes/2019.svg
[pypilink]: https://pypi.org/project/reporeleaser/
[pypibadge]: https://badge.fury.io/py/reporeleaser.svg
[travislink]: https://travis-ci.com/ludeeus/reporeleaser
[travisbadge]: https://travis-ci.com/ludeeus/reporeleaser.svg?branch=master
<!-- links end --> | /reporeleaser-1.4.1.tar.gz/reporeleaser-1.4.1/README.md | 0.604632 | 0.787911 | README.md | pypi |
import argparse
from .report_builder import build_report, print_report, build_df
from typing import Any
def parse_arguments() -> argparse.Namespace:
""" This function help to get attributes using CLI
:return: arguments from command-line interface such as --files : folder_path,
[--asc | --desc] shows list of drivers and optional order (default order is asc)
--driver shows statistic about driver
"""
parser = argparse.ArgumentParser()
parser.add_argument('--files', help='folder_path')
parser.add_argument('--asc', default=True, type=bool,
help='shows list of drivers and optional order (default order is asc)')
parser.add_argument('--desc', default=False, type=bool,
help='shows list of drivers and optional order (default order is asc)')
parser.add_argument('--driver', help='shows statistic about driver')
args = parser.parse_args()
return args
def cli_report_builder() -> Any:
"""This function create report using CLI of Monaco 2018 racers and print report
that shows the top 15 racers and the rest after underline.
:return: print report that shows the top 15 racers and the rest after underline,
order racers by time or print statistic about one driver wich you choose using cli
"""
try:
args = parse_arguments()
except:
raise AttributeError('Check your arguments!')
if not args.files:
raise FileNotFoundError('Need folder path!')
elif args.files:
try:
if args.driver:
df = build_report(build_df(args.files, False if args.desc else True))
driver_df = df[df.driver == args.driver]
if len(driver_df):
return print_report(df[df.driver == args.driver])
else:
raise AttributeError('Check driver name!')
else:
return print_report(build_report(build_df(args.files, False if args.desc else True)))
except FileNotFoundError:
raise FileNotFoundError('Check your folder path!') | /report_builder_Monaco_race-0.0.1-py3-none-any.whl/report_builder_Monaco_race/report.py | 0.619817 | 0.193052 | report.py | pypi |
import base64
def get_css(color):
font = '"Calibri", Arial, Helvetica, sans-serif'
return f"""
body {{ font-family: "Calibri"; font-size: 10pt; padding: 2px 7px; }}
p, h2, h3, h4, h5, h6, TD {{ {font} }}
h1 {{ {font}; color:#{color}; border-bottom: solid 1px }}
.dateheader {{ padding-left: 1pt; font-size:14pt }}
.underline {{ border-bottom: solid 1px; margin-bottom:20pt; padding-bottom: 5pt; color: #{color} }}
.report thead tr {{ background-color: #{color}; color: white }}
.report {{ border-collapse: collapse; margin: 10px; }}
.banner{color} {{
$FONT;
background-color: #{color};
color: #ffffff;
font-size: 16pt;
padding: 5pt 10pt;
text-align: left;
background: linear-gradient(45deg, #{color}, #{color}55);
}}
"""
def banner(date, title, banner_title, banner_subtitle, imagelink, color, body = ""):
style = get_css(color)
return f"""
<html>
<Title>{title}</Title>
<head>
<style>{style}</style>
</head>
<body>
<table style='width: 100%'><tr>
<td style='width:85px'><img align=left src='{imagelink}' height=70></td>
<td class=banner{color}><B>{banner_title}</B><BR><font-size=-1>{banner_subtitle}</font></td>
</tr>
</table>
<br>
<a class=dateheader>{date}</a><BR>
<div class='dateheader underline'><b>{title}</b></div>
{body}
</body>
</html>
"""
def encode_image(filename):
encoded_string = ""
with open(filename, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
file = encoded_string
return file.decode('ascii')
def data_url(filename):
return f'data:image/gif;base64,{encode_image(filename)}'
def color_map():
return \
{
'blue': '125AAA',
# 'blanchedalmond': 'FFEBCD',
'brown': 'A52A2A',
'cadetblue': '5F9EA0',
'chocolate': 'D2691E',
# 'cornsilk': 'FFF8DC',
'crimson': 'DC143C',
'darkblue': '00008B',
'darkmagenta': '8B008B',
'darkorchid': '9932CC',
'darkgreen': '006400',
'darkslateblue': '483D8B',
'darkslategray': '2F4F4F',
'firebrick': 'B22222',
'indigo': '4B0082',
'orangered': 'FF4500',
'teal': '008080',
'gold': 'FFD700'
}
if __name__ == "__main__":
date,title,banner_title, banner_subtitle = 'Monday, July 6th 2020', 'Fancy sales report', 'Team Name', 'Sales & Marketing'
imagelink, color = '', '125aaa'
colors = color_map()
with open(f'output/test-all.html','w') as f:
for name,color in colors.items():
print(color, name)
html = banner(date,title,banner_title , f"{banner_subtitle} - theme = {name}", imagelink, color)
f.write(html)
for name,color in colors.items():
with open(f'output/test-{name}.html','w') as f:
print(color, name)
html = banner(date,title,banner_title, banner_subtitle, imagelink, color)
f.write(html) | /report-generator-0.1.10.tar.gz/report-generator-0.1.10/report_generator/html_tools.py | 0.578567 | 0.177205 | html_tools.py | pypi |
from typing import List, Optional, Tuple, Union
import numpy as np
from scipy import ndimage
try:
import numpy.typing as npt
except ImportError: # pragma: no cover
pass
"""
Extract lesion candidates from a softmax prediction
Authors: anindox8, matinhz, joeranbosma
"""
def extract_lesion_candidates_static(
softmax: "npt.NDArray[np.float_]",
threshold: float = 0.10,
min_voxels_detection: int = 10,
max_prob_round_decimals: Optional[int] = 4
) -> "Tuple[npt.NDArray[np.float_], List[Tuple[int, float]], npt.NDArray[np.int_]]":
"""
Extract lesion candidates from a softmax volume using a static threshold.
"""
# load and preprocess softmax volume
all_hard_blobs = np.zeros_like(softmax)
confidences = []
clipped_softmax = softmax.copy()
clipped_softmax[softmax < threshold] = 0
blobs_index, num_blobs = ndimage.label(clipped_softmax, structure=np.ones((3, 3, 3)))
for idx in range(1, num_blobs+1):
# determine mask for current lesion
hard_mask = np.zeros_like(blobs_index)
hard_mask[blobs_index == idx] = 1
if np.count_nonzero(hard_mask) <= min_voxels_detection:
# remove small lesion candidates
blobs_index[hard_mask.astype(bool)] = 0
continue
# add sufficiently sized detection
hard_blob = hard_mask * clipped_softmax
max_prob = np.max(hard_blob)
if max_prob_round_decimals is not None:
max_prob = np.round(max_prob, max_prob_round_decimals)
hard_blob[hard_blob > 0] = max_prob
all_hard_blobs += hard_blob
confidences.append((idx, max_prob))
return all_hard_blobs, confidences, blobs_index
def extract_lesion_candidates_dynamic(
softmax: "npt.NDArray[np.float_]",
min_voxels_detection: int = 10,
num_lesions_to_extract: int = 5,
dynamic_threshold_factor: float = 2.5,
max_prob_round_decimals: Optional[int] = None,
remove_adjacent_lesion_candidates: bool = True,
max_prob_failsafe_stopping_threshold: float = 0.01
) -> "Tuple[npt.NDArray[np.float_], List[Tuple[int, float]], npt.NDArray[np.int_]]":
"""
Generate detection proposals using a dynamic threshold to determine the location and size of lesions.
"""
working_softmax = softmax.copy()
dynamic_hard_blobs = np.zeros_like(softmax)
confidences: List[Tuple[int, float]] = []
dynamic_indexed_blobs: "npt.NDArray[np.int_]" = np.zeros_like(softmax, dtype=int)
while len(confidences) < num_lesions_to_extract:
tumor_index = 1 + len(confidences)
# determine max. softmax
max_prob = np.max(working_softmax)
if max_prob < max_prob_failsafe_stopping_threshold:
break
# set dynamic threshold to half the max
threshold = max_prob / dynamic_threshold_factor
# extract blobs for dynamix threshold
all_hard_blobs, _, _ = extract_lesion_candidates_static(
softmax=working_softmax,
threshold=threshold,
min_voxels_detection=min_voxels_detection,
max_prob_round_decimals=max_prob_round_decimals
)
# select blob with max. confidence
# note: max_prob should be re-computed to account for the case where the max. prob
# was inside a 'lesion candidate' of less than min_voxels_detection, which is
# thus removed in preprocess_softmax_static.
max_prob = np.max(all_hard_blobs)
mask_current_lesion = (all_hard_blobs == max_prob)
# ensure that mask is only a single lesion candidate (this assumption fails when multiple lesions have the same max. prob)
mask_current_lesion_indexed, _ = ndimage.label(mask_current_lesion, structure=np.ones((3, 3, 3)))
mask_current_lesion = (mask_current_lesion_indexed == 1)
# create mask with its confidence
hard_blob = (all_hard_blobs * mask_current_lesion)
# Detect whether the extractted mask is too close to an existing lesion candidate
extracted_lesions_grown = ndimage.morphology.binary_dilation(dynamic_hard_blobs > 0, structure=np.ones((3, 3, 3)))
current_lesion_has_overlap = (mask_current_lesion & extracted_lesions_grown).any()
# Check if lesion candidate should be retained
if remove_adjacent_lesion_candidates and current_lesion_has_overlap:
# skip lesion candidate, as it is too close to an existing lesion candidate
pass
else:
# store extracted lesion
dynamic_hard_blobs += hard_blob
confidences += [(tumor_index, max_prob)]
dynamic_indexed_blobs += (mask_current_lesion * tumor_index)
# remove extracted lesion from working-softmax
working_softmax = (working_softmax * (~mask_current_lesion))
return dynamic_hard_blobs, confidences, dynamic_indexed_blobs
def extract_lesion_candidates(
softmax: "npt.NDArray[np.float_]",
threshold: Union[str, float] = 'dynamic-fast',
min_voxels_detection: int = 10,
num_lesions_to_extract: int = 5,
dynamic_threshold_factor: float = 2.5,
max_prob_round_decimals: Optional[int] = None,
remove_adjacent_lesion_candidates: bool = True,
) -> "Tuple[npt.NDArray[np.float_], List[Tuple[int, float]], npt.NDArray[np.int_]]":
"""
Generate detection proposals using a dynamic or static threshold to determine the size of lesions.
Parameters
----------
softmax : npt.NDArray[np.float_]
Softmax prediction
threshold : Union[str, float]
Threshold to use for the extraction of lesion candidates.
If 'dynamic', multiple thresholds are used, based on the softmax volume.
If 'dynamic-fast', a single threshold is used, based on the softmax volume.
If float, a static threshold is used (as specified).
min_voxels_detection : int
Minimum number of voxels in a lesion candidate.
num_lesions_to_extract : int
Number of lesion candidates to extract.
dynamic_threshold_factor : float
Ratio between max. of lesion candidate and its final extent. Higher factor means larger candidates.
max_prob_round_decimals : Optional[int]
Number of decimals to round the max. probability of a lesion candidate.
remove_adjacent_lesion_candidates : bool
If True, lesion candidates that are too close to an existing lesion candidate are removed.
Returns
-------
hard_blobs : npt.NDArray[np.float_]
Hard blobs of the input image, where each connected component is set to the lesion confidence.
confidences : List[Tuple[int, float]]
Confidences of the extracted lesion candidates (unordered).
Each entry is a tuple of (is_lesion, lesion confidence).
blobs_index : npt.NDArray[np.int_]
Volume where each connected component is set to the index of the extracted lesion candidate.
"""
# input validation
if softmax.dtype in [np.float16]:
softmax = softmax.astype(np.float32)
if softmax.dtype in [np.longdouble]: # float128
softmax = softmax.astype(np.float64)
if softmax.dtype in [np.csingle, np.cdouble, np.clongdouble]:
raise ValueError('Softmax predicitons should be of type float.')
if threshold == 'dynamic':
all_hard_blobs, confidences, indexed_pred = extract_lesion_candidates_dynamic(
softmax=softmax,
dynamic_threshold_factor=dynamic_threshold_factor,
num_lesions_to_extract=num_lesions_to_extract,
remove_adjacent_lesion_candidates=remove_adjacent_lesion_candidates,
min_voxels_detection=min_voxels_detection,
max_prob_round_decimals=max_prob_round_decimals
)
elif threshold == 'dynamic-fast':
# determine max. softmax and set a per-case 'static' threshold based on that
max_prob = np.max(softmax)
threshold = float(max_prob / dynamic_threshold_factor)
all_hard_blobs, confidences, indexed_pred = extract_lesion_candidates_static(
softmax=softmax,
threshold=threshold,
min_voxels_detection=min_voxels_detection,
max_prob_round_decimals=max_prob_round_decimals
)
else:
threshold = float(threshold) # convert threshold to float, if it wasn't already
all_hard_blobs, confidences, indexed_pred = extract_lesion_candidates_static(
softmax=softmax,
threshold=threshold,
min_voxels_detection=min_voxels_detection,
max_prob_round_decimals=max_prob_round_decimals
)
return all_hard_blobs, confidences, indexed_pred | /report_guided_annotation-0.2.7-py3-none-any.whl/report_guided_annotation/extract_lesion_candidates.py | 0.929752 | 0.494019 | extract_lesion_candidates.py | pypi |
from dataclasses import dataclass
from datetime import datetime
from typing import List
@dataclass
class Racer:
"""
A class used to represent Racer.
Attributes
----------
lap_time
Time in road.
car
Model car.
driver
Full driver name.
"""
lap_time: str
car: str
driver: str
abr: str
class RacingDataAnalyzer:
"""
Class order racers by time and print report that shows the 15 racers(asc or desc) and the rest after underline.
"""
def __init__(self, raw_data: List[List[str]]) -> None:
"""
Parameters
:param raw_data: text from 3 files
start_list: list
Data from start.log
end_list: list
Data from end.log
abbreviations_list: list
Data from abbreviations.txt
racer_data: list
Data from func build_report
"""
self.sorted_data = None
self.racer_data = None
self.start_list, self.end_list, self.abbreviations_list = raw_data
def build_report(self) -> List[Racer]:
"""
Lap time calculation.
:return:
Data with lap time, car and driver name.
"""
time_reg = '%H:%M:%S.%f'
racer_data = []
for start_item in self.start_list:
end = str([end_time for end_time in self.end_list if (start_item[0:7] in end_time)])
abbrev = [name.strip() for name in self.abbreviations_list if (start_item[0:3] in name)]
lap_time = datetime.strptime(end[16:28], time_reg) - datetime.strptime(start_item[14:26], time_reg)
if '-' not in str(lap_time):
abr, driver, car = abbrev[0].split('_', 2)
racer_data.append(Racer(str(lap_time), car, driver, abr))
self.racer_data = racer_data
return self.racer_data
def print_single_racer(self, driver_name: str) -> None:
"""
Print driver info.
Parameters
:param driver_name:
Full driver name.
"""
data_driver = next(iter([driver_info for driver_info in self.racer_data if driver_info.driver == driver_name]))
print(f"{data_driver.driver} | {data_driver.car} | {data_driver.lap_time}")
def print_reports(self, direction: bool) -> None:
"""
Print report that shows the 15 racers(asc or desc) and the rest after underline.
Parameters
:param direction:
Shows list of drivers and order by [--asc | --desc].
"""
sorted_data = sorted(self.racer_data, key=lambda time: time.lap_time, reverse=direction)
enumerate_data = enumerate(sorted_data, start=1)
for number, data_driver in enumerate_data:
if number == 16: # top 15 racers and the rest after underline
print('_' * 40)
print(f"{number}. {data_driver.driver} | {data_driver.car} | {data_driver.lap_time}")
def sort_by_time(self, direction: bool) -> List[Racer]:
"""
Return list that show racers(asc or desc).
Parameters
:param direction:
Sort list of drivers and order by [--asc | --desc].
"""
self.sorted_data = sorted(self.racer_data, key=lambda time: time.lap_time, reverse=direction)
return self.sorted_data
def enumerate_drivers(self) -> List[Racer]:
"""
Return report with enumerate.
"""
return list(enumerate(self.sorted_data, start=1))
def find_driver_by_code(self, driver_code: str) -> Racer:
"""
Return driver info.
Parameters
:param driver_code
"""
return next(iter([driver_info for driver_info in self.racer_data if driver_info.abr == driver_code])) | /report_monaco_road-0.0.9.tar.gz/report_monaco_road-0.0.9/report_monaco/racing_cli.py | 0.911086 | 0.364438 | racing_cli.py | pypi |
import argparse
from dataclasses import dataclass
from datetime import datetime
from datetime import timedelta
import sys
@dataclass
class Driver:
abbreviation: str
name: str
team: str
time_start: datetime = None
time_end: datetime = None
@property
def lap_time(self):
time_delta = self.time_end - self.time_start
if time_delta.days < 0 or time_delta.seconds > 1200:
return None
else:
return time_delta
def data_from_file(file_path):
file_elements = {}
with open(file_path) as file:
for line in file:
info = line.rstrip()
driver = info[:3]
time = datetime.strptime(info[3:], '%Y-%m-%d_%H:%M:%S.%f')
file_elements[driver] = time
return file_elements
def get_abbreviations(folder_path):
abbreviations = {}
with open(f'{folder_path}/abbreviations.txt', encoding='utf-8') as f:
for line in f:
abbr, name, team = line.rstrip('\n').split('_')
abbreviations[abbr] = [name, team]
return abbreviations
def join_info(racer_info, time_start, time_end):
result = []
for key in racer_info:
start = time_start.get(key)
end = time_end.get(key)
result.append(Driver(key, racer_info[key][0], racer_info[key][1], start, end))
return result
def load_data(path):
racer_info = get_abbreviations(path)
time_start = data_from_file(f'{path}/start.log')
time_end = data_from_file(f'{path}/end.log')
return join_info(racer_info, time_start, time_end)
def build_report(folder_path):
data = load_data(folder_path)
ready_report = {}
place = 1
report = sorted(data, key=lambda time: time.lap_time if isinstance(time.lap_time, timedelta) else timedelta.max)
for driver in report:
ready_report[driver.name] = [place, driver.team, str(driver.lap_time)]
place += 1
return ready_report
def print_report(folder_path, asc_sort=True):
result = build_report(folder_path)
next_stage_positions = 16
report = []
for driver in result:
if result[driver][0] == next_stage_positions:
report.append('-' * 65)
report.append('{0:2}.{1:17} | {2:25} | {3}'.format(result[driver][0], driver,
result[driver][1], result[driver][2]))
if not asc_sort:
report.reverse()
for driver in report:
print(driver)
def get_racer_info(path, racer_name):
data = load_data(path)
for racer in data:
if racer.name == racer_name:
return f'{racer.name} | {racer.team} | {racer.lap_time}'
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--files', type=str, help='Folder path')
parser.add_argument('--driver', type=str, help='Driver\'s name')
group = parser.add_mutually_exclusive_group()
group.add_argument('--asc', action='store_const', dest='order', const='asc', help='Ascending sort')
group.add_argument('--desc', action='store_const', dest='order', const='desc', help='Ascending sort')
return parser.parse_args(args)
def main():
args = parse_args(sys.argv[1:])
if args.files is not None:
if args.driver:
print(get_racer_info(args.files, args.driver))
else:
asc = False if args.order else True
print_report(args.files, asc)
if __name__ == '__main__':
main() | /report_monaco-0.0.4.tar.gz/report_monaco-0.0.4/src/report/report.py | 0.415136 | 0.202936 | report.py | pypi |
def compute_reporting_interval(item_count):
"""
Computes for a given number of items that will be processed how often the
progress should be reported
"""
if item_count > 100000:
log_interval = item_count // 100
elif item_count > 30:
log_interval = item_count // 10
else:
log_interval = 1
return log_interval
def report_on_interval(
iterable,
message='processed {i} of {n} items',
item_count=None,
printer=print,
get_deltas=compute_reporting_interval,
report_at_end=False,
extra_actions=[],
extras_at_end=False,
):
"""
Provides a generator of items from an iterable that prints a status report
on a regular interval as items are produced. May also execute extra actions
at each reporting interval
:param: iterable: The iterable to loop over
:param: message: The formatted message to print (special vars: i, n)
i: the index of the current iteration (+1)
n: the total number of iterations
:param: item_count: The number of items in the iterable. If None, the
iterable will be converted to a list so that the
len() method can be applied to get the item count.
NOTE: letting this method compute the item count
will hurt performance when a generator is passed as
the iterable (if the iterable is large)
:param: printer: The method used to print messages. Defaults to
``print`` directed to stdout.
:param: get_deltas: Function to compute interval for reporting.
Defaults to compute_reporting_interval().
Should take the count of items in the iterable as
the only argument.
:param: report_at_end: Include a reporting message after the last iteration
Will only occur if n is not a multiple of delta
:param: extra_actions: Other functions to execute on the interval. Should
be an iterable of callables (functions) that take no
arguments.
:param: extras_at_end: Execute extra actions after last iteration
Will only occur if n is not a multiple of delta
:Example:
>>> for entry in report_on_interval(some_stuff):
... do_something()
processed 30 of 300 items
processed 60 of 300 items
processed 90 of 300 items
...
"""
if item_count is not None:
n = item_count
else:
iterable = list(iterable)
n = len(iterable)
delta = get_deltas(n)
for (i, item) in enumerate(iterable):
yield item
if (i + 1) % delta == 0:
printer(message.format(i=i + 1, n=n))
[a() for a in extra_actions]
if n % delta != 0:
if report_at_end:
printer(message.format(i=n, n=n))
if extras_at_end:
[a() for a in extra_actions] | /report-on-interval-0.1.0.tar.gz/report-on-interval-0.1.0/report_on_interval/__init__.py | 0.822225 | 0.539287 | __init__.py | pypi |
import sys
import argparse
from typing import Any
from tabulate import tabulate
from report_race.constant import ORDER, TOP_RACERS, TABLE_FORMAT
from report_race.race import Race, LogFilesValidator
class OrderedNamespace(argparse.Namespace):
"""Namespace with ordered arguments.
Adds dictionary which saves order of input arguments in cli.
"""
def __init__(self, **kwargs):
"""Initialize with new attribute
Args:
kwargs: arguments in parser
"""
# _order will save input arguments in given order
self.__dict__["_order"] = []
super().__init__(**kwargs)
def __setattr__(self, attr: str, value: Any):
"""Set attributes.
_order will contain only attribute names, that was
provided in cli in preserved order. If same attribute is entered more
than once, only last one will be executed.
Example:
cli arguments:
--files <path> --time --date --time --report --time
execution order:
--date -- report --time
Args:
attr: Attributes in Namespace.
value: Values of attributes in Namespace.
"""
super().__setattr__(attr, value)
# Ignore attributes without data from cli.
if value not in [None, False]:
if attr not in self.__dict__["_order"]:
self.__dict__["_order"].append(attr)
else:
# Delete attr and append to the list to preserve
# the order of input arguments.
self.__dict__["_order"].remove(attr)
self.__dict__["_order"].append(attr)
def ordered(self) -> list[tuple]:
"""Generate values for attributes in _order.
Returns:
Return _order pairs of attributes and values.
"""
return [(attr, getattr(self, attr)) for attr in self._order
if attr != "files"]
def only_file_path(self):
"""Checks argument quantity.
Checks if any argument was given beside files path argument.
Raises:
UserWarning: If only files path was given as argument.
"""
if len(self.__dict__["_order"]) == 1:
raise UserWarning("Application should use at least one argument (except 'files' argument!)")
class CliArguments:
"""Represents command-line interface"""
@staticmethod
def cli():
"""Implement command-line interface
Uses argparse to get input from command-line
and print according result.
"""
parser = argparse.ArgumentParser(
prog="Report of Monaco 2018 racing.",
description="Get information about the race from the log files."
)
parser.add_argument(
"-f", "--files", required=True,
help="directory path where the race log files are located (required argument)"
)
parser.add_argument(
"-r", "--report", action="store_true",
help="report of the 1st qualification results"
)
parser.add_argument(
"-d", "--date", action="store_true",
help="date of the 1st qualification"
)
parser.add_argument(
"-t", "--time", action="store_true",
help="start time of the 1st qualifications"
)
parser.add_argument(
"--racers-number", action="store_true",
help="number of the racers in the race"
)
parser.add_argument(
"--driver",
help="information about specific driver"
)
parser.add_argument(
"--racers",
const=list(ORDER.keys())[0],
nargs="?",
choices=ORDER.keys(),
help="""list of all racers in the race in asc or"
desc order (default is asc)"""
)
parser.add_argument(
"--teams",
const=list(ORDER.keys())[0],
nargs="?",
choices=ORDER.keys(),
help="""list of all teams in the race in asc "
or desc order (default is asc)"""
)
parser.add_argument(
"--team-racers",
help="list of all racers from specific team"
)
args = parser.parse_args(namespace=OrderedNamespace())
try:
# Check if only "files" argument was passed
args.only_file_path()
except UserWarning as err:
print(err)
parser.print_usage()
sys.exit()
return args
class CliEngine:
"""Cli engine.
Class provides methods to get arguments from command-line, check input data
and print appropriate result.
Attributes:
METHODS_WITH_ARGUMENTS: Methods that are called with one argument
REPORT_HEADER: Header for report table
RACERS_HEADER Header for racers and racers in teams table
"""
METHODS_WITH_ARGUMENTS = ["driver", "racers", "teams", "team_racers"]
REPORT_HEADER = ["", "Full Name", "Team", "Q1 Time"]
RACERS_HEADER = ["Abbr", "Full Name", "Team", "Q1 Time"]
RACERS_HEADER_SHORT = ["Abbr", "Full Name"]
def __init__(self):
"""Initialize args, race and print_func attributes"""
self.args = CliArguments.cli()
try:
# Get log files from file directory and create race instance.
self.race = CliEngine.race_from_log_files(self.args.files)
except (TypeError, FileNotFoundError, NotADirectoryError, UserWarning) as err:
sys.exit(err)
# Save all print functions in dictionary in order to make execution simpler.
self._print_func = {"report": self.print_report,
"time": self.print_time,
"date": self.print_date_of_the_race,
"racers_number": self.print_number_of_racers,
"driver": self.print_driver,
"racers": self.print_racers,
"teams": self.print_teams,
"team_racers": self.print_racers_in_team}
@staticmethod
def race_from_log_files(path: str) -> Race:
"""Create Race instance
Create Race instance from log_files in given directory.
Args:
path: path of directory with log files.
"""
validator = LogFilesValidator(path)
return validator.init_race()
def print_result(self):
"""Call print methods according cli arguments"""
# Go through passed arguments and execute according print functions.
for arg, value in self.args.ordered():
if arg in CliEngine.METHODS_WITH_ARGUMENTS:
self._print_func[arg](value)
else:
self._print_func[arg]()
def print_date_of_the_race(self):
"""Print date of the race."""
print("\nDate of the race:")
print(f"\t{self.race.get_date_of_race()}")
def print_time(self):
"""Print start time of the 1st qualification."""
print("\n1st qualification round started at:")
print(f"\t{self.race.get_start_time_of_q1()}")
def print_number_of_racers(self):
"""Print number of racers in the race."""
print("\nNumber of the racers in the race:")
print(f"\t{self.race.get_number_of_racers()}")
def print_report(self):
"""Print race report.
Prints race report in nice table look.
"""
# Build report of the race.
race_report = self.race.get_report()
print("\nReport of the race:")
# Calculate separation line length for each column.
place_sep = "--"
racer_sep = "-" * len(max(self.race.get_racers()[1], key=len))
team_sep = "-" * len(max(self.race.get_teams(), key=len))
time_sep = "-" * len(race_report[0][3])
# Add separation line.
separation_line = [place_sep, racer_sep, team_sep, time_sep]
race_report.insert(TOP_RACERS, separation_line)
print(tabulate(race_report,
headers=CliEngine.REPORT_HEADER,
tablefmt=TABLE_FORMAT))
def print_driver(self, name: str):
"""Print driver information.
Args:
name: Driver full name.
Raises:
UserWarning: if no racer found with given name.
"""
try:
driver = self.race.get_racer(name)
print(f"\nInformation about {driver[1]}:")
print(tabulate([driver],
headers=CliEngine.RACERS_HEADER_SHORT,
tablefmt=TABLE_FORMAT))
except UserWarning as err:
print(err)
def print_racers(self, order: str):
"""Print list of racers in the race.
Args:
order: defines order of the result list by racer's abbreviation.
"""
racers = self.race.get_racers(reverse=ORDER[order])
print(tabulate(racers,
headers=CliEngine.RACERS_HEADER,
tablefmt=TABLE_FORMAT))
def print_teams(self, order: str):
"""Print list of teams in the race.
Args:
order: Flag for racers order.
"""
teams = [[team] for team in self.race.get_teams(reverse=ORDER[order])]
print(tabulate(teams,
headers=["List of teams:"],
tablefmt=TABLE_FORMAT))
def print_racers_in_team(self, team: str):
"""Print racers in specific team.
Args:
team: Name of the team.
"""
try:
racers = self.race.get_racers_in_team(team)
print(f"\nRacers from {team.upper()} team:")
print(tabulate(racers,
headers=CliEngine.RACERS_HEADER,
tablefmt=TABLE_FORMAT, ))
except UserWarning as err:
print(err)
if __name__ == "__main__":
cli = CliEngine()
cli.print_result() | /report_race-0.1.0-py3-none-any.whl/report_race/report.py | 0.651022 | 0.212978 | report.py | pypi |
import os
from datetime import datetime, timedelta
from typing import Union
from report_race.constant import REPORT_FILES
class Race:
"""Class represents race.
Args:
racers: dictionary of racers participated in the race. example:
{"NHR":
{"name": "Nico Hulkenberg",
"team": RENAULT,
"q1_start": datatime of string 2018-05-24_12:02:49.914,
"q1_end": datatime of string 2018-05-24_12:04:02.979}
Attributes:
TRAILING_ZEROS: Index where trailing zeros ends in the lap time string
Example: 00:02:12:831000
LEADING_ZEROS: Index where leading zeros starts in the lap time string
Example: 00:02:12:831000
Raises:
UserError:
if get_racer and get_racers_in_team methods can't find name of the
racer or the team in dictionary of racers.
"""
TRAILING_ZEROS = 3
LEADING_ZEROS = -3
def __init__(self, racers: dict[str: dict[str: Union[str, datetime]]]):
self.racers = racers
def get_number_of_racers(self) -> int:
"""Get number of the racers in the race.
Returns:
Number of the racers in the race.
"""
number_of_racers = len(self.racers.keys())
return number_of_racers
def get_date_of_race(self) -> datetime.date:
"""Get date in which race took place
Returns:
Date of the race.
"""
race_date = self.racers[list(self.racers.keys())[0]]["q1_start"].date()
return race_date
def get_start_time_of_q1(self) -> datetime.time:
"""Get start time of the 1st qualification.
Returns:
1st qualifications start time.
"""
q1_start_time = None
# Loop through all racers q1_start time and find the earliest one.
for racer in self.racers.values():
racer_start_time = racer["q1_start"].time()
if q1_start_time:
# Check if racer_stat_time is earlier than q1_start_time.
if q1_start_time > racer_start_time:
q1_start_time = racer_start_time
else:
# If q1 start time is None assign with first racers start time.
q1_start_time = racer_start_time
return q1_start_time
def get_racers(self, reverse=False) -> list[list]:
"""Get all racer names in the race.
Collect all racer abbreviation and full name
by asc or desc order.
Args:
reverse: Defines order of racers in the list.
If True than order of the racer abbreviation is reverse.
Returns:
List of information about racers in the list of list in asc or desc order.
"""
racers = [[abbr,
racer["name"]]
for abbr, racer in self.racers.items()]
# Sort by abbreviation.
racers = sorted(racers, key=lambda x: x[0], reverse=reverse)
return racers
def get_teams(self, reverse=False) -> list[str]:
"""Get all team names in race.
Collect all team names which are participating in the race
by asc or desc order.
Args:
reverse: Defines order of team names in the list.
If True than order of the team names is reverse.
Returns:
List of team names in asc or desc order.
"""
# Use set to filter unique teams
teams_in_race = {racer["team"] for racer in self.racers.values()}
teams_in_race = sorted(teams_in_race, reverse=reverse)
return teams_in_race
def get_report(self) -> list[list[str, str, timedelta]]:
"""Build result of the 1st qualification.
Returns:
List of Racers place, name, team, lap time.
"""
race_report = []
for racer in self.racers.values():
# list[name, team, lap time in string format: 2:12:831]
race_report.append(
[racer["name"],
racer["team"],
(str(racer["q1_end"] - racer["q1_start"])[Race.TRAILING_ZEROS: Race.LEADING_ZEROS])])
# Sort report by lap time.
race_report = sorted(race_report, key=lambda x: x[2])
# Add place of the racer. Need to add after sorted lap time result.
for place, racer in enumerate(race_report):
racer.insert(0, place + 1)
return race_report
def get_racer(self, name: str) -> list:
"""Get racer data.
Args:
name: Name of the racer.
Returns:
Data about racer.
Raises:
UserWarning when name of the racers is not in dictionary of racers.
"""
for key, racer in self.racers.items():
if racer["name"] == name:
racer_data = [key, racer["name"], racer["team"],
str(racer["q1_end"] - racer["q1_start"])[3: -3]]
return racer_data
raise UserWarning(f"Can't find racer with name: {name}")
def get_racer_by_id(self, abbr: str) -> list:
"""Get racer data by id (abbreviation).
Args:
abbr: ID (abbreviation) of the racer.
Returns:
Data about racer.
Raises:
UserWarning when ID of the racer is not in dictionary of racers.
"""
for key, racer in self.racers.items():
if key == abbr:
racer_data = [key, racer["name"], racer["team"],
str(racer["q1_end"] - racer["q1_start"])[3: -3]]
return racer_data
raise UserWarning(f"Can't find racer with id: {abbr}")
def get_racers_in_team(self, team: str, order=False) -> list[list]:
"""Get racers from the team.
Args:
team: Name of the team.
order: Defines order of racer names in the list.
If True than order of the racer names is reverse.
Returns:
List of racers data.
Raises:
UserWarning when team is not in dictionary of racers.
"""
racers_in_team = []
for key, racer in self.racers.items():
if racer["team"] == team.upper():
racer_data = [key, racer["name"], racer["team"],
str(racer["q1_end"] - racer["q1_start"])[3: -3]]
racers_in_team.append(racer_data)
racers_in_team = sorted(racers_in_team, key=lambda x: x[0], reverse=order)
if racers_in_team:
return racers_in_team
raise UserWarning(f"Can't find team with name: {team}")
class LogFilesValidator:
"""Validator for Race class.
Validates log_files from which Race class could be built.
Args:
path: path of directory with log files.
Attributes:
LOOK_UP_KEYS: list of keys for log_files validation.
_racers: dictionary of racers participated in the race.
Raises:
UserError:
if file/s in the path is/are missing or "log_files" argument contains invalid data.
TypeError:
if "log_files" argument contains unacceptable types.
FileNotFoundError:
if file in path from "log_files" can't be found.
PermissionError:
if file in path from "log_files" can't be read.
"""
LOOK_UP_KEYS = ["abbreviations", "start", "end"]
def __init__(self, path: str):
"""Get "log_files" from "path" argument."""
self.log_files = self.get_path_to_files(path)
self._racers = {}
# Check "log_files" argument for valid structure and content.
self._validate_log_files_argument()
# Get information about racers.
self._data_from_abbreviation(self.log_files["abbreviations"])
# Get start and finish time of racer's best lap.
for key, file_path in self.log_files.items():
if key in ["start", "end"]:
self._data_from_log_files(key, file_path)
@staticmethod
def get_path_to_files(path: str) -> dict[str: str]:
"""Get path to the log files.
"log_files" argument must be a dictionary with three key-value pairs:
{"abbreviations": "path/to/abbreviations.txt",
"start": "path/to/start.log",
"end": "path/to/end.log"}
Returns:
Dictionary with file name as key and path to the file as value.
Raises:
UserWarning: if not all files where found in the folder.
NotADirectoryError: if path argument is not a dictionary.
FileNotFoundError: if path does not exist.
"""
# Get path of the log files from directory if file name is in lookup list
log_files = {os.path.splitext(f)[0]: os.path.join(path, f)
for f in os.listdir(path)
if f.lower() in REPORT_FILES}
# Check if dictionary has all necessary files.
if len(log_files) != len(REPORT_FILES):
raise UserWarning("File missing. Make sure that folder contains this files:"
f" {REPORT_FILES}")
return log_files
@staticmethod
def __validate_log_files_len(log_file_len: int):
"""Validate length of "log_files" argument
Args:
log_file_len: length of log_files.
Raises:
UserWarning: if "log_files" argument len is not 3.
"""
if log_file_len != 3:
raise UserWarning("log_files argument should contain three key value pairs. "
f"{log_file_len} was provided.")
@staticmethod
def __validate_log_files_keys(key: str):
"""Check if "log_files" argument have all necessary keys.
Args:
key: key in "log_files" argument.
Raises:
UserWarning: if invalid key in "log_files" argument.
"""
if key not in LogFilesValidator.LOOK_UP_KEYS:
raise UserWarning(f"Invalid key in log_files argument: {key}")
@staticmethod
def __validate_log_files_values(file_path: str):
"""Check if file name in "log_files" argument are correct.
Args:
file_path: path to the log file.
Raises:
UserWarning: if invalid value in "log_files" argument.
"""
try:
if os.path.basename(file_path) not in REPORT_FILES:
raise UserWarning(f"Invalid value in log_files argument {file_path}")
except (TypeError, UserWarning):
raise
@staticmethod
def __validate_log_files_is_empty(file_path):
"""Check if file in file_path from "log_files" argument is mot empty.
Args:
file_path: path of the log file.
Raises:
UserWarning: if file is empty.
"""
if os.stat(file_path).st_size == 0:
raise UserWarning(f"File is empty: {os.path.basename(file_path)}")
def _validate_log_files_argument(self):
"""Validate "log_files" argument.
"log_files" argument must be a dictionary with three key-value pairs:
{"abbreviations": "path/to/abbreviations.txt",
"start": "path/to/start.log",
"end": "path/to/end.log"}
Raises:
UserWarning: if invalid key value pairs.
TypeError: if file_path string can't be used by [os.path.basename].
"""
try:
# Check length of log_files argument.
self.__validate_log_files_len(len(self.log_files))
# Check if keys and values are acceptable.
for key, file_path in self.log_files.items():
self.__validate_log_files_keys(key)
self.__validate_log_files_values(file_path)
self.__validate_log_files_is_empty(file_path)
except (TypeError, UserWarning):
raise
def _data_from_log_files(self, key, file_path):
"""Get data from log files.
Read files and get the start and finish time of racers best lap.
Args:
key: key in log_files argument.
file_path: file_path string in log_files argument
Raises:
FileNotFoundError: if file_path is not found.
PermissionError: if program don't have permission to read file_path.
"""
try:
with open(file_path, encoding="utf8") as file:
for line in file:
line = line.strip()
if line:
# First three chars in the line is abbreviation - key,
# rest is 1st qualification start time or end time of the lap
self._racers[line[:3]]["q1_" + key] = datetime.strptime(
line[3:].strip(), "%Y-%m-%d_%H:%M:%S.%f")
except (FileNotFoundError, PermissionError):
raise
def _data_from_abbreviation(self, file_path):
"""Get data from abbreviations file.
Read file and get abbreviation, full name and the team of the racer.
Args:
file_path: file_path string in log_files argument
Raises:
FileNotFoundError: if file_path is not found.
PermissionError: if program don't have permission to read file_path.
"""
try:
with open(file_path, encoding="utf8") as file:
for line in file:
line = line.strip()
if line:
# Information about racer is split with underscore:
# "abbreviation_name of the racer_racer team".
racer = line.split("_")
# Abbreviation is the key in dictionary of racers.
self._racers[racer[0]] = {"name": racer[1], "team": racer[2]}
except (FileNotFoundError, PermissionError):
raise
def init_race(self) -> Race:
"""Initialize Race class from _racers
Returns:
Instance of Race class
"""
return Race(self._racers) | /report_race-0.1.0-py3-none-any.whl/report_race/race.py | 0.891363 | 0.413122 | race.py | pypi |
from datetime import datetime
import re
import argparse
def build_report(start_log_file, end_log_file, sort=None):
"""
Build a report based on data from start and end log files.
:param start_log_file: (str) the path to the start log file.
:param end_log_file: (str) the path to the end log file.
:param sort: (str, optional) the order in which the report should be sorted.
Accepted values 'desc'. The default value is 'asc'
:return: A dictionary containing the race results.
In the format {driver_abbreviation: race_time}.
"""
start_log = {}
end_log = {}
race_result = {}
with open(start_log_file, 'r') as f:
for line in f.readlines():
key, value = re.findall(r"(.+?\D)(\d.+)", line.strip())[0]
start_log[key] = value
with open(end_log_file, 'r') as f:
for line in f.readlines():
key, value = re.findall(r"(.+?\D)(\d.+)", line.strip())[0]
end_log[key] = value
for key in start_log:
start_time = datetime.strptime(start_log[key], '%Y-%m-%d_%H:%M:%S.%f')
end_time = datetime.strptime(end_log[key], '%Y-%m-%d_%H:%M:%S.%f')
result_time = abs(end_time - start_time)
seconds_time = result_time.total_seconds()
result_str = datetime.utcfromtimestamp(seconds_time).strftime('%M:%S.%f')
race_result[key] = result_str.strip('0')
if sort == 'desc':
race_result = dict(sorted(race_result.items(), key=lambda item: item[1], reverse=True))
else:
race_result = dict(sorted(race_result.items(), key=lambda item: item[1]))
return race_result
def print_report(abbreviations_file, build_result, driver_name=None):
"""
Print report of a race or specific driver.
:param abbreviations_file: (srt) the path to the file with the driver abbreviation data.
:param build_result: (dict) dictionary with data on race results, with driver abbreviations
as key and race time as value.
:param driver_name: (srt, optional) parameter to specify a specific driver for print race data.
:return: - None
"""
abbreviations = {}
dict_result = {}
with open(abbreviations_file, 'r', encoding='utf-8') as f:
for line in f.readlines():
key, value = line.strip().split('_', 1)
abbreviations[key] = value
for key in build_result:
result = build_result[key]
driver_team = abbreviations[key]
driver, team = driver_team.split('_')
if driver_name is None or driver_name == driver:
dict_result[driver] = (team, result)
if driver_name is None:
driver_place = 0
for driver, data in dict_result.items():
driver_place += 1
team, result = data
if driver_place == 16:
print('-' * 60)
print(f"{driver:<20} | {team:<25} | {result}")
else:
team, result = dict_result[driver_name]
print(f"{driver_name:<20} | {team:<25} | {result}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--files', required=True, help='folder containing the data files')
parser.add_argument('--driver', type=str, help='name of the driver')
parser.add_argument('--asc', action='store_true', help='sort results in ascending order')
parser.add_argument('--desc', action='store_true', help='sort results in descending order')
args = parser.parse_args()
folder_path = args.files
driver_name = args.driver
start_log_file = f'{folder_path}/start.log'
end_log_file = f'{folder_path}/end.log'
abbreviations_file = f'{folder_path}/abbreviations.txt'
sorted_asc = build_report(start_log_file, end_log_file)
if args.driver:
print_report(abbreviations_file, sorted_asc, driver_name)
elif args.desc:
sorted_desc = build_report(start_log_file, end_log_file, sort='desc')
print_report(abbreviations_file, sorted_desc)
else:
print_report(abbreviations_file, sorted_asc)
if __name__ == '__main__':
main() | /report_racing_vasiliy-0.0.2.tar.gz/report_racing_vasiliy-0.0.2/src/cli_report/report.py | 0.692954 | 0.421016 | report.py | pypi |
import argparse
from datetime import datetime, timedelta
import pathlib
from dataclasses import dataclass
@dataclass
class Pilot(object):
racer_name: str = None
start_time: datetime = None
company: str = None
end_time: datetime = None
@property
def lap_time(self):
if self.start_time and self.end_time and self.end_time >= self.start_time:
return self.end_time - self.start_time
"""The program displays the rating of the tournament participants"""
SPLITTER = 15
DATE_TEMPLATE = '%Y-%m-%d_%H:%M:%S.%f'
def parse_files(start, end, abbreviations):
"""Function for parsing files with participant data
Reads all files, converts text to objects and adds
data to a dictionary"""
pilots = {}
for line in abbreviations:
pilot_abbr = line[:3]
key, name, company = line.split('_')
pilots[pilot_abbr] = Pilot(racer_name=name, company=company.replace('\n', ''))
get_dates(pilots, start, 'start_time')
get_dates(pilots, end, 'end_time')
return pilots
def get_dates(pilots, data, attr_name):
for line in data:
pilot_abbr = line[:3]
date = datetime.strptime(line[3:].strip(), DATE_TEMPLATE)
params = {attr_name: date}
if pilot_abbr in pilots:
setattr(pilots[pilot_abbr], attr_name, date)
else:
pilots[pilot_abbr] = Pilot(**params)
return pilots
def build_report(pilots, reverse=False):
"""Sorts by lap time
Reads a data dictionary, calculates lap times
and sorts by lap times"""
abbr_racer = []
sorted_abbr = sorted(
pilots,
key=lambda pilot: (0, pilots[pilot].lap_time) if pilots[pilot].lap_time is not None
else (1, 0),
reverse=reverse
)
for element in sorted_abbr:
abbr_racer.append(pilots[element])
return abbr_racer
def print_report(abbr_racer, reversing=False):
"""Function for forming the standings
Handles lap time errors, returns a list with the data
of the riders in the given sequence"""
standings = []
if reversing:
cut_index = len(abbr_racer) - 1 - SPLITTER
else:
cut_index = SPLITTER - 1
for counter, elements in enumerate(abbr_racer):
if elements.lap_time is not None:
_, minutes, seconds = str(elements.lap_time).split(':')
standings.append('{0: <20}{1: <26}{2}'.format(elements.racer_name, elements.company, f'{minutes}:{seconds.rstrip("0")}'))
else:
standings.append('{0: <18}{err}'.format(elements.racer_name, err="- Error"))
if counter == cut_index:
standings.append(f'-------------------------------------------------------')
return '\r\n'.join(standings)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--files', type=pathlib.Path, required=True)
parser.add_argument('--asc', action='store_true')
parser.add_argument('--desc', action='store_true')
parser.add_argument('--driver', type=str)
return parser.parse_args()
def get_single_driver(data, driver_name):
for key, value in data.items():
if driver_name == value['racer_name']:
return {key: value}
def main():
arguments = parse_arguments()
start = list(arguments.files.glob('start.log')).pop()
end = list(arguments.files.glob('end.log')).pop()
abbreviations = list(arguments.files.glob('abbreviations.txt')).pop()
data = parse_files(start.open(), end.open(), abbreviations.open())
if arguments.asc and arguments.desc:
raise ValueError('Should be one argument --asc or --desc')
reversing = arguments.desc
if arguments.driver:
data = get_single_driver(data, arguments.driver)
if __name__ == '__main__':
main() | /report_racing-1.4-py3-none-any.whl/report_racing/report_racing.py | 0.644449 | 0.337913 | report_racing.py | pypi |
from report_ranger.errors import InputError
import logging
log = logging.getLogger(__name__)
def combine_matrix(matrix, next_matrix):
""" Combine two risk matrices where the final risk in one is fed into the next. It is called recursively """
# Create a shallow copy of the matrix so we're not changing what is being passed to the function
newmatrix = matrix.copy()
for stage in newmatrix.keys():
# See if we're at the end, otherwise recurse
if isinstance(newmatrix[stage], dict):
# We're in a dict so recurse
newmatrix[stage] = combine_matrix(newmatrix[stage], next_matrix)
else:
# This is a reference to the next matrix so add it in
if newmatrix[stage] not in next_matrix:
log.error(
"Risk matrix validation error: In validating the risk assessment, we have not been able to find {} in the next matrix stage. Options are {}.".format(
newmatrix[stage], next_matrix.keys()))
raise Exception("In validating the risk assessment, we have not been able to find {} in the next matrix stage. Options are {}.".format(
newmatrix[stage], next_matrix.keys()))
else:
# Add what's in the next matrix so this field is combined
newmatrix[stage] = next_matrix[newmatrix[stage]]
return newmatrix
def validate_matrix(matrix, stages, risks):
""" Validates that each combination of stages is in the final matrix and the risks all make sense """
if len(stages) == 0:
log.error(
"Risk matrix validation error: Excessively deep risk matrix found. We have the following part of the matrix, but we're out of risk stages. {}".format(
matrix))
# We've gotten to the end of stages, but we still have a matrix. This isn't good.
raise Exception("Excessively deep risk matrix found. We have the following part of the matrix, but we're out of risk stages. {}".format(
matrix))
current_stage = stages[0]
future_stages = stages[1:]
for stage in current_stage['ratings']:
if stage not in matrix:
log.error(
"Risk matrix validation error: Stage {} not found in the remaining matrix. All we have is {}".format(
stage, matrix.keys()))
raise Exception("Stage {} not found in the remaining matrix. All we have is {}".format(
stage, matrix.keys()))
elif len(future_stages) == 0:
if matrix[stage] not in risks:
log.error(
"Risk matrix validation error: Could not find {} in the risks. The risks we have are {}".format(
matrix[stage], risks))
raise Exception("Could not find {} in the risks. The risks we have are {}".format(
matrix[stage], risks))
else:
# Validate the next level of the matrix
return validate_matrix(matrix[stage], future_stages, risks)
def get_risk(matrix, stages, headers):
"""
A recursive assisting function that goes through the risk assessment tree to get the final risk
"""
if len(stages) == 0:
return matrix
# Make sure that the next stage is actually in the headers
if stages[0]['id'] not in headers:
log.warn("{} not found in the headers of the vulnerability file as required by the template. Assigning risk as None.".format(
stages[0]['id']))
return "None"
stageval = headers[stages[0]['id']]
# If the header for this stage is a list of length one then take that value
if type(stageval) is list:
if len(stageval) != 1:
stageval = stageval[0]
else:
log.warn("{} in this vulnerability was a list rather than a string. Assigning risk as None.".format(
stages[0]['id']))
return "None"
# If the value of the header isn't in the matrix, cast an alert
if stageval not in matrix:
log.warn("'{}' is not a valid value for {}. The selection is: {}".format(
stageval, stages[0]['id'], stages[0]['ratings']))
return "None"
return get_risk(matrix[headers[stages[0]['id']]], stages[1:], headers)
class RiskAssessment:
"""
Holds the risk assessment, allows easy application of the risk assessment
Can be customised
"""
def __init__(self, riskassessment):
self.name = riskassessment['name']
self.id = riskassessment['id']
self.stages = riskassessment['stages']
self.risks = ["None", "Closed"] + riskassessment['risks']
self.methodology = riskassessment['methodology']
if 'mapping' in riskassessment:
self.mapping = riskassessment['mapping']
else:
self.mapping = {}
if 'style_text' in riskassessment:
self.style_text = riskassessment['style_text']
else:
self.style_text = {}
matrix = riskassessment['matrix']
# First we see if there's a split matrix, and if so combine them into one
if isinstance(matrix, list):
# We have a split matrix. Combine them together
combinedmatrix = matrix[0]
for next_matrix in matrix[1:]:
combinedmatrix = combine_matrix(combinedmatrix, next_matrix)
self.matrix = combinedmatrix
else:
self.matrix = matrix
# Validate the matrix
validate_matrix(self.matrix, self.stages, self.risks)
# Generate the table
self.generate_riskmatrixtable()
def get_risk(self, headers):
""" Get the risk from the submitted risk assessment """
if 'status' in headers and headers['status'] == 'Closed':
return 'Closed'
# Process the mappings
for value, mapping in self.mapping.items():
# We have to make sure we're not overwriting a value here
if mapping not in headers and value in headers:
headers[mapping] = headers[value]
for stage in self.stages:
# Make sure that the next stage is actually in the headers
if stage['id'] not in headers:
log.warn("{} not found in the headers of the vulnerability file as required by the template. Assigning risk as None.".format(
stage['id']))
# Reset all stages to None if they're not there
for s in self.stages:
if s['id'] not in headers:
headers[s['id']] = "None"
return "None"
if 'mapping' in stage:
# Process the mappings
for value, mapping in stage['mapping'].items():
if headers[stage['id']] == value:
headers[mapping] = headers[value]
return get_risk(self.matrix, self.stages, headers)
def generate_riskmatrixtable(self):
"""
This displays a two stage risk matrix in a table. Note that it will only generate output if there are specifically 2 stages.
"""
if len(self.stages) != 2:
log.info(
"Not auto generating a risk assessment table as there are not specifically 2 stages.")
return ''
sa = self.stages[0]
sb = self.stages[1]
theaders = [['thead'] * (len(sa['ratings']) + 1)]
ratable = [[[sb['name'], sa['name']]] + sa['ratings']]
sbratings = sb['ratings']
sbratings.reverse()
for sbrating in sbratings:
rs = [sbrating]
th = ['thead']
for sarating in sa['ratings']:
rs.extend(
[self.get_risk({sa['id']: sarating, sb['id']: sbrating})])
th.extend(
['t' + self.get_risk({sa['id']: sarating, sb['id']: sbrating}).lower().replace(' ', '')])
ratable += [rs]
theaders += [th]
self.riskmatrixtable = ratable | /report_ranger-2.2-py3-none-any.whl/report_ranger/riskassessment.py | 0.758063 | 0.560373 | riskassessment.py | pypi |
from uuid import uuid4
import re
import logging
log = logging.getLogger(__name__)
class ContentAssistant:
def __init__(self):
self.defaulticounter = None
self.icounters = {}
self.defaultcounter = 0
self.counters = {}
self.defaulttable = []
self.tables = {}
self._register = {}
self._string_register = {}
self._table_register = {} # For holding table formatting
self._function_register = {} # For holding simple function calls
def _display_table(self, uuid, markdown, of, tablename="", *args, **kwargs):
""" Display a table from
"""
if tablename == "":
table = self.defaulttable
elif not isinstance(tablename, str):
log.warn(
"CA table name you're trying to display to is not a string, skipping: {}".format(tablename))
return ""
elif tablename not in self.tables:
log.warning(
"Table {} has not had any rows added to it. Skipping the display of this table.".format(tablename))
return ""
else:
table = self.tables[tablename]
return markdown.replace(uuid, of.table(table, *args, **kwargs))
def _display_icounter(self, uuid, markdown, of):
""" Convert the counter UUIDs in the document to numbers.
"""
searchre = re.compile("(x|y|z){}".format(uuid))
counter = 0
max = 10000
while max > 0:
max -= 1
match = searchre.search(markdown)
if match == None:
return markdown
if(match.group()[0]) == 'x':
counter += 1
if(match.group()[0]) == 'z':
counter = 0
markdown = searchre.sub("", markdown, 1)
else:
markdown = searchre.sub(str(counter), markdown, 1)
log.warn("Max counter size of 10000 reached.")
return markdown
def _register_string(self, string):
newuuid = str(uuid4())
self._string_register[newuuid] = string
return newuuid
def _register_ca(self, function, *args, **kwargs):
""" Add a function to the register. This can be called later so that we can display content on the second pass
"""
newuuid = str(uuid4())
self._register[newuuid] = {
'function': function, 'args': args, 'kwargs': kwargs}
return newuuid
def _register_formattable(self, *args, **kwargs):
""" Add a function to the register. This can be called later so that we can display content on the second pass
"""
newuuid = str(uuid4())
self._table_register[newuuid] = {'args': args, 'kwargs': kwargs}
return newuuid
def parse_register(self, of, markdown):
""" Go through the register and print out the results of the functions
"""
log.info("Adding Content Assistant content")
# CA register
for uuid, f in self._register.items():
markdown = f['function'](
uuid, markdown, of, *f['args'], **f['kwargs'])
# Now to go through calls to ft(). The below code will replace the markdown table with a table() call.
t_r_row = r'\|?([^\|\r\n]+\|)+[^\|\r\n]*'
t_r_header_sep = r'\|?(\:?\s+-+\s*\:?\s*\|)*'
table_regex = r'[\r\s\n]*((' + t_r_row + \
r')?[\r\s\n]*(' + t_r_header_sep + \
r')?([\r\s\n]*' + t_r_row + ')+)'
# Table register
for uuid, f in self._table_register.items():
m = re.search(uuid + table_regex, markdown)
if not m:
log.warn(
"Table formatting string does not seem to be attached to a markdown table")
markdown = markdown.replace(uuid, '')
continue
table_markdown = m.group(1)
table_output = of.table(table_markdown, *f['args'], **f['kwargs'])
start, end = m.span()
markdown = markdown[:start] + \
table_output + markdown[end:]
# String register
if len(self._string_register) > 0:
regex = re.compile('|'.join(re.escape(str(key)) for key in sorted(
self._string_register.keys(), key=lambda item: - len(item))))
markdown = regex.sub(lambda match: self._string_register[match.group()], markdown)
return markdown
def icounter(self, countername="", mode="x"):
""" This function allows counters throughout the document. Multiple counters can be used by specifying counter names.
"""
if mode not in ['x', 'y', 'z']:
log.warn("Unknown mode for icounter {}".format(countername))
return ""
if countername == "":
if self.defaulticounter == None:
self.defaulticounter = self._register_ca(
self._display_icounter)
return "{}{}".format(mode, self.defaulticounter)
elif not isinstance(countername, str):
log.warn("Counter name not a string: {}".format(countername))
return 0
else:
if countername not in self.icounters:
self.icounters[countername] = self._register_ca(
self._display_icounter)
return "{}{}".format(mode, self.icounters[countername])
def get_icounter(self, countername=""):
""" This function retrieves the value of the counter WITHOUT iterating it.
"""
return self.icounter(countername, "y")
def reset_icounter(self, countername=""):
return self.icounter(countername, "z")
def counter(self, countername="", iterate=True):
""" This function allows counters throughout the document. Multiple counters can be used by specifying counter names.
"""
if countername == "":
if iterate:
self.defaultcounter += 1
return self.defaultcounter
elif not isinstance(countername, str):
log.warn("Counter name not a string: {}".format(countername))
return 0
else:
if countername not in self.counters:
self.counters[countername] = 0
if iterate:
self.counters[countername] += 1
return self.counters[countername]
def get_counter(self, countername=""):
""" This function retrieves the value of the counter WITHOUT iterating it.
"""
return self.counter(countername, False)
def reset_counter(self, countername=""):
""" Reset a counter to 0
"""
if countername == "":
self.defaultcounter = 0
elif not isinstance(countername, str):
log.warn(
"Counter name you're trying to reset is not a string, skipping: {}".format(countername))
else:
self.counters[countername] = 0
return ""
def table_row(self, tablename="", row=[]):
""" Add a row to a content assistant table, which can then be displayed with display_table
"""
if tablename == "":
self.defaulttable.append(row)
elif not isinstance(tablename, str):
log.warn(
"CA table name you're trying to add a row to is not a string, skipping: {}".format(tablename))
else:
if tablename not in self.tables:
self.tables[tablename] = []
self.tables[tablename].append(row)
return ""
def table_rows(self, tablename="", rows=[]):
""" Add multiple rows to a content assistant table
"""
if tablename == "":
self.defaulttable += rows
elif not isinstance(tablename, str):
log.warn(
"CA table name you're trying to add rows to is not a string, skipping: {}".format(tablename))
else:
if tablename not in self.tables:
self.tables[tablename] = []
self.tables[tablename] += rows
return ""
def display_table(self, *args, **kwargs):
return self._register_ca(self._display_table, *args, **kwargs)
def format_table(self, *args, **kwargs):
""" Format the markdown table that's below. This allows simpler syntax for fancy formatting of markdown tables, all in jinja2 templating.
It requires two passes, with this function returning a UUID and the second pass grepping the UUID and markdown to call of.table.
"""
return self._register_formattable(*args, **kwargs) | /report_ranger-2.2-py3-none-any.whl/report_ranger/contentassistant.py | 0.488283 | 0.166032 | contentassistant.py | pypi |
import cerberus
import logging
log = logging.getLogger(__name__)
def make_list(value):
""" If it's a list, return value, otherwise return [value]
"""
if type(value) is list:
return value
else:
return [value]
def filter_rows(table, validation, return_fails=False):
"""Filter table rows based on the supplied validation
"""
validator = cerberus.Validator()
validator.allow_unknown = True
validator.schema = validation
new_table = []
for row in table:
if validator.validate(row):
if not return_fails:
new_table.append(row)
else:
if return_fails:
new_table.append(row)
return new_table
def sort_table(table, column, mapper=None, reverse=False):
"""Sort the table based off content in the chosen column.
"""
if len(table) == 0:
return table
if mapper:
return sorted(table, key=lambda d: mapper[d[column]] if d[column] in mapper else d[column], reverse=reverse)
return sorted(table, key=lambda d: d[column], reverse=reverse)
def table_aggregate_value(table, column, aggtype, map_values=None):
"""Get aggregate value from the supplied table.
aggtype can be 'min', 'max', 'mean', 'sum', or 'count'.
map_values is a dict {value:mapping}. It will substitute value for mapping if found in the column.
"""
val = None
sum = 0
count = 0
for row in table:
if type(row) is dict:
if column in row:
cv = row[column]
else:
continue
else:
if column >= 0 and column < len(row):
cv = row[column]
else:
continue
if map_values:
if cv in map_values:
cv = map_values[cv]
if aggtype == 'min':
if val == None or cv < val:
val = cv
elif aggtype == 'max':
if val == None or cv > val:
val = cv
elif aggtype == 'count':
count += 1
elif aggtype == 'sum':
sum += cv
elif aggtype == 'mean':
sum += cv
count += 1
else:
log.warn(
"Invalid aggtype encountered for table_aggregate_value: {}".format(aggtype))
return None
if aggtype == 'min' or aggtype == 'max':
return val
if aggtype == 'count':
return count
if aggtype == 'sum':
return sum
if aggtype == 'mean':
return sum / count
return None
def _get_aggregate(cgbv, prevrows, groupby, dict_list, agg, map_values):
nr = cgbv
# Do the aggregation
for column, aggtype in agg.items():
if dict_list:
nr["{}_{}".format(column, aggtype)
] = table_aggregate_value(prevrows, column, aggtype, map_values)
else:
nr.append(table_aggregate_value(
prevrows, column, aggtype, map_values))
return nr
def table_aggregate(table, groupby, agg, map_values=None):
"""Get aggregate information from a table with groupings specified in groupby.
groupby should be either a single column or a list of columns. If the table is a list of dicts the columns should be
column heading names. If it is a list of lists it should be the index number of the table.
"""
# Go backwards sorting with groupby so that we have everything sorted
if type(groupby) is str or type(groupby) is int:
# If it's a string make it a list of one
groupby = [groupby]
st = table
for gb in reversed(groupby):
st = sort_table(st, gb)
if len(st) == 0:
return []
if type(st[0]) is dict:
dict_list = True
else:
dict_list = False
# We now have everything sorted by the groupby columns. Now we need to get the aggregates
if dict_list:
cgbv = {}
for gb in groupby:
cgbv[gb] = st[0][gb]
else:
cgbv = []
for gb in groupby:
cgbv.append(st[0][gb])
prevrows = []
new_table = []
for row in st:
for i in groupby:
if row[i] != cgbv[i]:
# We have a new row
if len(prevrows) > 0:
# We're not at the start of the table so we've hit the next block to aggregate
# Initialise the new row with what's in groupby
new_table.append(_get_aggregate(
cgbv, prevrows, groupby, dict_list, agg, map_values))
# Initialise the new values
if dict_list:
cgbv = {}
for gb in groupby:
cgbv[gb] = row[gb]
else:
cgbv = []
for gb in groupby:
cgbv.append(row[gb])
prevrows = []
# Add the row to prevrows
prevrows.append(row)
new_table.append(_get_aggregate(
cgbv, prevrows, groupby, dict_list, agg, map_values))
return new_table
def table_to_dict(table, keyindex=None, valindex=None, keys=None, default=None):
""" Convert tabular data into a dict of key:value pairs taking the keys and values from the keyindex and valindex columns.
This is used for feeding tabular data into charts.
keys provides a filter for keys. If default is set, then keys that don't appear in the table will appear in the returned dict
with the default value.
"""
d = {}
if keys != None and default != None:
for k in keys:
d[k] = default
if len(table) == 0:
# We have an empty table
if keys != None and default != None:
for key in keys:
d[key] = default
return d
else:
return {}
if keyindex == None:
if len(table[0]) < 1:
log.warning(
"keyindex for table_to_dict is not set and the first row has less than 1 value so it cannot be automatically set")
return {}
keyindex = list(table[0].keys())[0]
if valindex == None:
if len(table[0]) < 2:
log.warning(
"valindex for table_to_dict is not set and the first row has less than 2 values so it cannot be automatically set")
return {}
valindex = list(table[0].keys())[1]
for row in table:
if keyindex not in row or valindex not in row:
continue
if keys:
if row[keyindex] not in keys:
continue
d[row[keyindex]] = row[valindex]
return d
def tables_outer_join(table1, table2, column1, column2=None):
""" Match the data in column1 for table1 to data in column2 for table2 to perform an outerjoin on two tables
Tables can be either lists of lists or lists of dicts. If it is a list of lists then the column should be an int, otherwise
it should refer to the dictionary key of the column.
If column2 is not specified, column1 will be used for table2 as well.
"""
new_table = []
if len(table1) == 0:
return []
if type(table1[0]) is dict:
dict_list = True
else:
dict_list = False
for r1 in table1:
for r2 in table2:
if column2 != None:
if r1[column1] == r2[column2]:
if dict_list:
new_table.append(r1 | r2)
else:
new_table.append(r1 + r2)
else:
if r1[column1] == r2[column1]:
if dict_list:
new_table.append(r1 | r2)
else:
new_table.append(r1 + r2)
return new_table
def table_separate_column(table, column, separator, columnmapper=None, separatormapper=None, separator_list=None, dict_list=False):
""" Separate out the content of one column into separate columns for each unique value of the content in separator.
This is useful for displaying compliance stuff. For instance, you have a list of Essential 8 controls in a spreadsheet
and you want a list of controls for each maturity level. You can use this to show them in three columns.
If the content of separator is integers the output will be in order of the integer.
columnmapper is used to change the content of the column in the output. For instance changing "Passed"/"Failed" to "P"/"F".
separatormapper can be used in the same way for the separator. This can be used to map two pieces of content to the same
thing or to map content to integers to have them ordered in the right way.
separator_list can manually force the columns by using this list.
"""
if separator_list is not None:
separators = separator_list
else:
separators = []
# First we need to go through and get all the separators
for row in table:
if separatormapper is not None and row[separator] in separatormapper:
if separatormapper[row[separator]] not in separators:
separators.append(separatormapper[row[separator]])
elif row[separator] not in separators:
separators.append(row[separator])
# We need to get the integers out and put them at the start in order
orderedsep = []
otherseps = []
for sep in separators:
if type(sep) is int:
orderedsep.append(sep)
else:
otherseps.append(sep)
orderedsep = sorted(orderedsep)
separators = orderedsep + otherseps
# We now have a list of separators with ints ordered at the front
# To get the columns we have a dict of lists.
columns = {}
for sep in separators:
columns[sep] = []
# Let's put the column contents into the above data structure
for row in table:
if separatormapper is not None and row[separator] in separatormapper:
sep = separatormapper[row[separator]]
else:
sep = row[separator]
if sep not in separators:
continue
if columnmapper is not None and row[column] in columnmapper:
col = columnmapper[row[column]]
else:
col = row[column]
columns[sep].append(col)
# We now have the columns in order. Create a new table.
new_table = []
finished = False
while not finished:
finished = True
if dict_list:
new_row = {}
else:
new_row = []
for sep in columns:
if len(columns[sep]) == 0:
nc = ""
else:
nc = columns[sep].pop(0)
finished = False
if dict_list:
new_row[sep] = nc
else:
new_row.append(nc)
if not finished:
new_table.append(new_row)
return new_table
def table_separate_column_groups(table, groupby, column, separator, columnmapper=None, separatormapper=None, separator_list=None, dict_list=False):
"""table_separate_column but it
groupby is used to separate out into groups. This way each group will start at the same level.
"""
# Get a list of groups
groups = {}
for row in table:
if groupby not in row:
gb = None
else:
gb = row[groupby]
if gb == "None":
gb = 'default_table_separate_column_groups'
if gb not in groups:
groups[gb] = []
groups[gb].append(row)
new_table = []
for gb, group in groups.items():
new_table += table_separate_column(
group, column, separator, columnmapper, separatormapper, separator_list, dict_list)
return new_table
def table_add_row(table, row=None, index=None):
"""Add a row to the table. If row is None it adds an empty row. If index is None it adds it to the end.
"""
if row == None:
if len(table) == 0:
return [[]]
if type(table[0]) is dict:
row = {}
else:
row = []
if index > len(table):
table.append(row)
else:
table.insert(index, row)
return table
def separate_sequences(sequence):
"""Separates out a dict into repeated keys. Useful for coloured charts.
For example:
{'a':'1', 'b':'2'}
becomes
{'a':{'a':'1'}, {'b':{'b':'2'}}
"""
newdict = {}
for k, i in sequence.items():
newdict[k] = {k: i}
return newdict | /report_ranger-2.2-py3-none-any.whl/report_ranger/helpers.py | 0.487063 | 0.390795 | helpers.py | pypi |
import re
import logging
from report_ranger.utils.mdread import process_template
from report_ranger.helpers import filter_rows as fr
log = logging.getLogger(__name__)
def style_text_match(style_text, text, regexlist=None):
''' See if the text is in the style_text dict and match it to stylelist '''
if type(text) is not str:
return None
if not regexlist:
regexlist = [(re.compile(ts), cf)
for (ts, cf) in style_text.items()]
# We have to go through each regex to see if it matches
for ts in regexlist:
# Regexes are in a tuple (regex, cf)
if ts[0].match(text):
return ts[1]
class Table:
""" This class holds a table to be used in output formatters. It will hold things like table contents, headings, alignment, widths
The table has the following state:
table: A list of lists containing the content of each cell, always guaranteed to be square. Table will pad if necessary.
cellstyles: A list of lists containing a string with the style of each square, always guaranteed be the same size of table.
colalign: A list of strings, either 'l', 'c', 'r', 'j' or an empty string. Guaranteed to be the width of the table.
cellalign: A list of lists of strings, either 'l', 'c', 'r', 'j' or an empty string, always guaranteed be the same size of table.
colspan: A list of lists containing numbers where each number >= 1 or -1 if the cell is overwritten, always guaranteed be the same size of table. This is for when you would like a cell to overwrite the next column.
rowspan: A list of lists containing numbers where each number >= 1 or -1 if the cell is overwritten, always guaranteed be the same size of table. This is for when you would like a cell to overwrite the next row.
colwidths: A single list of numbers. Guaranteed to be the width of the table. This corresponds to the width of each cell.
"""
def _pad_matrix(self, m, width=0, height=0, pad_value=''):
""" Pad the matrix m (a list of lists) to the set width and height with the pad_value.
If the width and height is not set, make sure that the matrix m is rectangular, with each row being the same length by padding them with pad_value.
"""
if m == None:
m = []
for i in range(height):
m += [[pad_value]*width]
return m
if height == 0:
height = len(m)
if width == 0:
for i in m:
mw = len(i)
if mw > width:
width = mw
for row in m:
if len(row) < width:
row += [pad_value]*(width-len(row))
if len(row) > width:
row = row[:width]
if len(m) < height:
for i in range(height-len(m)):
m += [[pad_value]*width]
elif len(m) > height:
m = m[:height]
return m
def _pad_list(self, l, width, pad_value=''):
""" Pad the list l to the required width with pad_value """
if l == None:
l = []
if len(l) < width:
l += [pad_value]*(width-len(l))
return l
def _clear_table(self):
self.table = [[]]
self.width = 0
self.height = 0
self.cellstyles = []
self.colalign = []
self.cellalign = []
self.colspan = []
self.rowspan = []
self.colwidths = []
def _markdown_to_matrix(self, markdown):
""" Convert the markdown string into a table.
Returns """
re_tablecells = re.compile(r'(\||^)([^\|]*)(?=(\||$))')
re_isheaderline = re.compile(r'^\|?(\s*:?-+:?\s*\|?)*\s*$')
re_hastablecells = re.compile(r'\|')
lines = markdown.splitlines()
matrixtable = []
colalign = []
has_heading = False
for line in lines:
if not re_hastablecells.match(line):
continue
cells = re_tablecells.findall(line)
# Is it the heading line?
if re_isheaderline.match(line):
has_heading = True
for cell in cells:
ic = cell[1].strip()
if len(ic) == 0:
continue
if ic[0] == ':':
if ic[-1] == ':':
colalign.append('c')
else:
colalign.append('l')
elif ic[-1] == ':':
colalign.append('r')
else:
colalign.append('l')
else:
row = []
# Check to see if the first line has a |, if so skip the last one
if cells[0][0] == '|':
endpipe = True
for cell in cells:
row.append(cell[1].strip())
if endpipe: # Kill last empty cell
if row[-1] == '':
row = row[:-1]
matrixtable.append(row)
return matrixtable, colalign, has_heading
def _process_list_of_dicts(self, table, append_column, env):
"""Convert a list of dicts to a matrix with a header line
append_column will be appended to the end of each row.
"""
tableheadings = None
for r in range(len(table)):
if not isinstance(table[r], dict):
log.warn(
f"Table defined as dicts has something which is not a dict, replacing with empty row: {table[r]}")
table[r] = dict()
# Process append_column
if isinstance(append_column, dict):
append_column_done = True
newcols = {}
for key in append_column:
newcols[key] = process_template(
table[r], append_column[key], env=env, name="append_column")
table[r].update(newcols)
tableheadings = []
# Get the headings from the dicts
for row in table:
for h in row.keys():
if h not in tableheadings:
tableheadings.append(h)
newtable = []
# Headings will always be the first row of the new table
newtable.append(tableheadings)
for row in table:
newrow = []
for h in tableheadings:
if h in row.keys():
newrow.append(row[h])
else:
# This heading isn't in this row, add a blank
newrow.append("")
newtable.append(newrow)
return newtable, tableheadings
def __init__(self, table, env=None, filter_rows=None, header=[], headings=None, cellstyles=None, colstyles=None, colalign=None, cellalign=None, colspan=None, rowspan=None, colwidths=None, append_column=None, colpicker=None, rowpicker=None, style_text={}):
# Is it just a markdown table?
if isinstance(table, str):
table, newcolalign, has_heading = self._markdown_to_matrix(table)
# Append colalign if it's not complete
if colalign == None:
colalign = []
colalign += newcolalign[len(colalign):]
# Set a heading row if it has one
if has_heading == True and headings == None:
headings = 'top'
# The table now has to be a list. If it is not a list, warn and skip
if not isinstance(table, list):
log.warn(f"Table not a list. Trying to make a table out of {table}.")
self._clear_table()
return
# If there's no rows there's nothing to display
if len(table) == 0:
log.warn("Table has no rows, skipping table.")
self._clear_table()
return
# We perform validation now, after converting a blank markdown table and before converting list of dicts to list of lists
if filter_rows:
table = fr(table, filter_rows)
if len(table) == 0:
log.warning("Table has no rows after filtering")
self._clear_table()
return
append_column_done = False
# We are representing the table in a dict form and we need to translate that into a table. For instance:
# affected_hosts:
# - hostname: host.com
# port: 80
# - hostname: host2.com
# port: 80
tableheadings = None
if isinstance(table[0], dict):
table, tableheadings = self._process_list_of_dicts(table, append_column, env)
append_column_done = True
# We should now have a list of lists. Let's just make sure!
newtable = []
for row in table:
if not isinstance(row, list):
log.warn(f"Table row not a list. Skipping table row: {row}")
else:
newtable.append(row)
table = newtable
# If we have a list of lists, then append_column won't be done yet. Let's do that.
if append_column and not append_column_done:
if isinstance(append_column, list):
for row in range(len(table)):
rowdict = {}
# We need to index for each column
for col in range(len(row)):
rowdict['col' + str(col)] = table[row][col]
for key in range(len(append_column)):
table[row].append(process_template(
rowdict, append_column[key], env=env, name="append_column"))
elif isinstance(append_column, dict):
log.warning(f"append_column a dict for a table of lists, skipping: {append_column}")
else:
log.warning(f"append_column not a list, skipping: {append_column}")
# Pad everything
self.table = self._pad_matrix(table, pad_value='')
width, height = len(table[0]), len(table)
self.width, self.height = width, height
# Handle rowpicker and colpicker
# Rowpicker first, since filtering rows is easier and quicker than filtering columns
if rowpicker != None and rowpicker != []:
if not isinstance(rowpicker, list):
log.warn("Rowpicker variable not a list, ignoring.")
else:
newtable = []
for row in rowpicker:
if not isinstance(row, int):
log.warn(
"Entry in rowpicker was not int. Found: {}".format(row))
elif row < 0 or row >= len(self.table):
log.warn("Entry in rowpicker outside the range of the table length {}. Found: {}".format(
len(self.table), row))
else:
newtable.append(self.table[row])
self.table = newtable
self.height = len(self.table)
if colpicker != None and colpicker != []:
if not isinstance(colpicker, list):
log.warn("Colpicker variable not a list, ignoring.")
else:
newtable = []
for i in range(len(self.table)):
newtable.append([])
for col in colpicker:
if isinstance(col, str):
# Check to see if we're referring to a column heading
if col in self.table[0]:
col = self.table[0].index(col)
else:
log.warn(
"Entry in colpicker is a string but not a column heading. Found: {}".format(col))
continue
elif not isinstance(col, int):
log.warn(
"Entry in colpicker was not int. Found: {}".format(col))
continue
if col < 0 or col >= len(self.table[0]):
log.warn("Entry in colpicker outside the range of the table length {}. Found: {}".format(
len(self.table[0]), col))
continue
for trow in range(len(self.table)):
newtable[trow].append(self.table[trow][col])
self.table = newtable
if len(self.table) > 0: # Do we actually still have a table?
self.width = len(self.table[0])
else:
self.width = 0
# If there's no table anymore due to rowpicker or colpicker then get rid of the rest, just cancel it out
if self.width == 0:
log.warn("rowpicker and colpicker resulted in an empty table.")
self._clear_table()
return
# Put in the header if it's been supplied
if header != []:
if isinstance(header, list):
# If there's tableheadings from a dict already, remove them
if tableheadings:
self.table = [header] + self.table[1:]
else:
self.table = [header] + self.table
# Do we need to repad?
if width != len(header):
self.table = self._pad_matrix(self.table, pad_value='')
self.width = len(self.table[0])
self.height = len(self.table)
else:
log.warn(
"Header of the table is not a list. Trying to add {}.".format(header))
# colalign must be one of 'l' 'c' 'r' or 'j' or an empty string
colalign = self._pad_list(colalign, self.width, '')
for i in range(self.width):
if colalign[i] not in 'lcrj':
colalign[i] = ''
cellalign = self._pad_matrix(cellalign, self.width, self.height, '')
for i in range(self.height):
for j in range(self.width):
if cellalign[i][j] not in 'lcrj':
cellalign[i][j] = ''
self.colalign = colalign
self.cellalign = cellalign
# Handle colspan and rowspan
colspan = self._pad_matrix(
colspan, self.width, self.height, 1) # Pad to size of table
rowspan = self._pad_matrix(
rowspan, self.width, self.height, 1) # Pad to size of table
# Handle colspan
for i in range(self.height):
for j in range(self.width):
if not int(colspan[i][j]): # Validate it's an int
colspan[i][j] = 1
if colspan[i][j] >= 1: # We have a colspan!
# Does it go over the side of the table?
if j + colspan[i][j] > self.width:
colspan[i][j] = self.width - j # Snip it off
# We blank out the rest of the span
for span in range(1, colspan[i][j]):
colspan[i][j+span] = -1
# Handle rowspan. This is equivalent of the above, just swapping row and column
for i in range(self.height):
for j in range(self.width):
if not int(rowspan[i][j]):
rowspan[i][j] = 1
if rowspan[i][j] >= 1:
if i + rowspan[i][j] > self.height:
rowspan[i][j] = self.height - i
for span in range(1, rowspan[i][j]):
rowspan[i+span][j] = -1
self.colspan = colspan
self.rowspan = rowspan
self.colwidths = self._pad_list(colwidths, self.width, 0)
cellstyles = self._pad_matrix(cellstyles, self.width, self.height, '')
# Allow the 'left', 'top', and 'left-top' headings settings
if not isinstance(headings, list) and not headings == None:
headingslist = headings.split('-')
if 'left' in headingslist:
for row in cellstyles:
if row[0] == '':
row[0] = 'h'
if 'right' in headingslist:
for row in cellstyles:
if row[-1] == '':
row[-1] = 'h'
if 'top' in headingslist:
for i in range(len(cellstyles[0])):
if cellstyles[0][i] == '':
cellstyles[0][i] = 'h'
if 'bottom' in headingslist:
for i in range(len(cellstyles[-1])):
if cellstyles[-1][i] == '':
cellstyles[-1][i] = 'h'
else:
cellstyles = self._pad_matrix(
cellstyles, self.width, self.height, '')
# Do the col styles if set. Go through and set each entire column in cellstyles as per what is set by the user in colstyles.
# Note that this won't overwrite headers
if colstyles:
for row in range(len(cellstyles)):
for col in range(len(cellstyles[row])):
# Don't overwrite existing cellstyles, only fill in if it's blank
if (cellstyles[row][col] == '' or cellstyles[row][col] == None) and col < len(colstyles) and colstyles[col] and colstyles[col] != '':
cellstyles[row][col] = colstyles[col]
# If there's headings text sync then add them
if style_text:
# Get all the regexes
stregexlist = [(re.compile(ts), cf)
for (ts, cf) in style_text.items()]
for r in range(self.height):
for c in range(self.width):
# First check to see if there's a style in cellstyles
if type(cellstyles[r][c]) is dict:
# If it's a dict, treat it as a style text dict
st_return = style_text_match(
cellstyles[r][c], self.table[r][c])
if st_return:
cellstyles[r][c] = st_return
else:
cellstyles[r][c] = ''
elif type(cellstyles[r][c]) is str and cellstyles[r][c] != '':
# Leave it as is if there's something filled in
continue
elif style_text:
# Now check master style_text
cellstyle = style_text_match(
style_text, self.table[r][c], stregexlist)
if cellstyle:
cellstyles[r][c] = cellstyle
else:
cellstyles[r][c] = ''
self.cellstyles = cellstyles | /report_ranger-2.2-py3-none-any.whl/report_ranger/table.py | 0.555315 | 0.468365 | table.py | pypi |
from copy import deepcopy
import logging
log = logging.getLogger(__name__)
class Environment:
""" Store environment variables
This class operates as a stack. You can push a bunch of variables onto the stack when you go down a layer
(say when a file is included). You can then pop off the stack when we finish with that file.
"""
def __init__(self, other=None):
self._static = {}
self._variable = [{}]
self._private = [{}]
if other != None:
# We can duplicate another environment. This involved deep copying so we don't accidentally
# modify the other environment
self._static = deepcopy(other._static)
self._variable = deepcopy(other._variable)
def set_private(self, key, value):
# Set the private variable in the most recent stack
# _private allows a set of hidden variables that can be kept track of through files
# These variables cannot be accessed within the report
self._private[-1][key] = value
def get_private(self, key):
# Go backwards through self._private looking to see if the variable is there
for ve in reversed(self._private):
if key in ve:
return ve[key]
def set_static(self, key, value):
'''Set a static variable in the environment
Static variables cannot be overwritten, they always win in a clash. This means they survive pushes and pops.
'''
self._static[key] = value
def set_variable(self, key, value):
'''Set a variable in the environment
This variable will be set in the current layer of the stack. For instance, if pop is called the variable is lost.
'''
# When we set a variable we only set it to the last piece of the stack
self._variable[-1][key] = value
def set_variables(self, env=dict()):
'''Set multiple variables in the environment
These variables should be passed as a dict.
'''
self._variable[-1].update(env)
def get(self, key):
'''Get the variable with the key "key".'''
# Static wins in a variable clash, so see if it's there first
if key in self._static:
return self._static[key]
# Go backwards through self._variable looking to see if the variable is there
for ve in reversed(self._variable):
if key in ve:
return ve[key]
# We haven't found this variable
return None
def get_env(self):
'''Get the current variables from the stack
This function returns a dict full of key value pairs representing the current state of the environment.'''
# We get the environment by overlaying each layer of the stack and then returning
# what is at the end.
env = {}
for varenv in self._variable:
env.update(varenv)
# Static always wins, so finally overlay that
env.update(self._static)
return env
def push(self, newenv={}):
'''Add a layer to the stack
This allows an isolated environment, where the variables you set can be removed once it's done.
For instance, if you include a file, the headers of the included file will not affect the outer file.
'''
self._variable.append(newenv)
self._private.append({})
def pop(self):
'''Take a layer off the stack
This will remove all variables that have been added since the last time push() was called. Overwritten variables
will be replaced with what was there before.
'''
self._variable.pop()
self._private.pop() | /report_ranger-2.2-py3-none-any.whl/report_ranger/environment.py | 0.662141 | 0.391755 | environment.py | pypi |
import mistune
import re
import logging
log = logging.getLogger(__name__)
class TypstRenderer(mistune.HTMLRenderer):
NAME = 'typst'
IS_TREE = False
ordered = False
escape_text = False
def escape(self, text):
''' Escape the given text for display in Typst output
:param text: a plain text message
:return: the message escaped to appear correctly in Typst
'''
conv = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'[': r'\[',
']': r'\]',
'@': r'\@',
'*': r'\*',
'\\': r'\\'
}
regex = re.compile('|'.join(re.escape(str(key)) for key in sorted(
conv.keys(), key=lambda item: - len(item))))
return regex.sub(lambda match: conv[match.group()], text)
def __init__(self, escape=True):
super(TypstRenderer, self).__init__()
self.escape_text = escape
def text(self, text):
return self.escape(text) if self.escape_text == True else text
def link(self, link, text=None, title=None):
if text is None or text == '':
return f'#link("{link}")'
return f'#link("{link}")[{text}]'
def image(self, src, alt="", title=None):
if alt != "":
return f'#figure(image("{src}"), caption: [{alt}])'
else:
return f'#image("{src}")'
def emphasis(self, text):
return f'_{text}_'
def strong(self, text):
return f'*{text}*'
def codespan(self, text):
return self.block_code(text)
def linebreak(self):
return '\n\n'
def inline_html(self, html):
return html
def paragraph(self, text):
return text + '\n\n'
def heading(self, text, level):
return '=' * level + f' {text}\n\n'
def newline(self):
return '\n'
def thematic_break(self):
return ''
def block_text(self, text):
return text
def block_code(self, code, info=None):
return f'```\n{code}\n```'
def block_quote(self, text):
return text
def block_html(self, html):
return html
def block_error(self, html):
return html
def list(self, text, ordered, level, start=None):
self.ordered = ordered
return f'{text}\n'
def list_item(self, text, level):
if self.ordered:
return level*'+' + f' {text}\n'
return level*'-' + f' {text}\n' | /report_ranger-2.2-py3-none-any.whl/report_ranger/markdown_renderer/typstrenderer.py | 0.471953 | 0.201145 | typstrenderer.py | pypi |
import mistune
import re
import logging
log = logging.getLogger(__name__)
class LatexTableRenderer(mistune.HTMLRenderer):
NAME = 'latex'
IS_TREE = False
def _escape_latex(self, text):
''' Escape the given text for display in Latex output
:param text: a plain text message
:return: the message escaped to appear correctly in LaTeX
'''
conv = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'{\textasciitilde}',
'^': r'\^',
'\\': r'{\textbackslash}',
'<': r'{\textless}',
'>': r'{\textgreater}',
}
regex = re.compile('|'.join(re.escape(str(key)) for key in sorted(
conv.keys(), key=lambda item: - len(item))))
return regex.sub(lambda match: conv[match.group()], text)
def _escape_latex_codespan(self, text):
''' Escape the given text for codespans in latex. This has different rules to elsewhere.
'''
sepchar = None
for char in "!\"'()*,-./:;_":
if char not in text:
sepchar = char
break
if not sepchar:
return None
conv = {
'&': r'\&',
'%': r'\%',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\~',
'\\': r'\\\\'
}
regex = re.compile('|'.join(re.escape(str(key)) for key in sorted(
conv.keys(), key=lambda item: - len(item))))
return sepchar + regex.sub(lambda match: conv[match.group()], text) + sepchar
def __init__(self, escape=True):
super(LatexTableRenderer, self).__init__()
self._escape = escape
def text(self, text):
return self._escape_latex(text)
def link(self, link, text=None, title=None):
if text is None or text == '':
return '\\url{{{}}}'.format(self._escape_latex(link))
s = '\\href{' + \
self._escape_latex(link) + '}{' + self._escape_latex(text) + '}'
return s
def image(self, src, alt="", title=None):
s = '\\begin{{figure}}'
s += '\\centering'
s += '\\includegraphics{{{}}}}'.format(self._escape_latex(src))
s += '\\caption{{{}}}'.format(self._escape_latex(alt))
s += '\\end{{figure}}'
return s
def emphasis(self, text):
return '\\emph{' + text + '}'
def strong(self, text):
return '\\textbf{' + text + '}'
def codespan(self, text):
code = '\\passthrough{\\lstinline' + \
self._escape_latex_codespan(text) + '}'
if code != None:
return code
return self.block_code(text)
def linebreak(self):
return ' \\\\ '
def inline_html(self, html):
return self._escape_latex(html)
def paragraph(self, text):
return text + ' \\\\ '
def heading(self, text, level):
# TODO DOES NOT PROPERLY DO HEADINGS
return text
def newline(self):
return ''
def thematic_break(self):
# TODO NO THEMATIC BREAK
return ''
def block_text(self, text):
return text
def block_code(self, code, info=None):
code = code.replace('\n', '^^J\n')
code = code.replace('{', '\{')
code = code.replace('}', '\}')
return f"""\\begin{{lstlisting}}^^J
{code}^^J\\end{{lstlisting}}"""
def block_quote(self, text):
return text
def block_html(self, html):
return html
def block_error(self, html):
return html
def list(self, text, ordered, level, start=None):
if ordered:
return "\n\\begin{{enumerate}}\n{}\\end{{enumerate}}".format(text)
else:
return "\n\\begin{{varwidth}}[t]{{\\linewidth}}\n\\begin{{itemize}}[topsep = 0pt, parsep = 0pt]\n{}\\strut\\end{{itemize}}\end{{varwidth}}\n".format(text)
def list_item(self, text, level):
return " \item {}\n".format(text) | /report_ranger-2.2-py3-none-any.whl/report_ranger/markdown_renderer/latexrenderer.py | 0.504394 | 0.194923 | latexrenderer.py | pypi |
from report_ranger.output_formatter.outputformatter import OutputFormatter
import re
import mistune
from report_ranger.markdown_renderer.csvrenderer import CSVRenderer
from report_ranger.table import Table
import logging
import csv
from tabulate import tabulate
log = logging.getLogger(__name__)
csv_defaults = {
'columns': {
'Section': 'section',
'Name': 'name',
'Risk': 'risk',
'Details': 'markdown'
}
}
header_aliases = {
}
class CSVFormatter(OutputFormatter):
def __init__(self, templateheaders=dict(), timer=None, watcher=None):
OutputFormatter.__init__(self, templateheaders, timer, watcher=watcher)
self.figformat = "png"
csvoptions = {}
csvoptions.update(csv_defaults)
if 'csv' in templateheaders:
csvoptions.update(templateheaders['csv'])
self.columns = csvoptions['columns']
def escape(self, text):
''' Escape the given text based on the format we're working with
:param text: a plain text message
:return: the message escaped to appear correctly in CSV
'''
conv = {
'\'': r'\'\''
}
regex = re.compile('|'.join(re.escape(str(key)) for key in sorted(
conv.keys(), key=lambda item: - len(item))))
return regex.sub(lambda match: conv[match.group()], text)
def table(self, table, options={}, **tableargs):
''' This function formats the table in either latex or markdown, depending on the output.'''
t = Table(table, env=self.env, **tableargs)
# We're going to do a markdown table with tabulate
colalign = t.colalign
# We need to convert the 'l' and 'r' in colalign if it exists
for col in range(len(colalign)):
if colalign[col] == 'l':
colalign[col] = 'left'
elif colalign[col] == 'c':
colalign[col] = 'center'
elif colalign[col] == 'r':
colalign[col] = 'right'
else:
colalign[col] = ''
if len(table.header) == 0:
return tabulate(t.table, stralign=colalign, tablefmt="github")
else:
return tabulate(t.table[1:], t.table[0], stralign=colalign, tablefmt="github")
def headers(self):
return ''
def output(self, markdown, outputfile=''):
""" Output vulnerability details to CSV.
This involves getting the vulnerabilities, splitting the content between vulnerability headings, stripping formatting, and outputting into CSV.
"""
# Get vulnerability information
rows = []
rows.append(list(self.columns.keys()))
vuln_list = self.env.get('vulnerabilities')
for section in vuln_list.sections:
for vuln in section.vulnerabilities:
lr = CSVRenderer()
lrm = mistune.create_markdown(renderer=lr)
potcols = {}
markdown = vuln.markdown
mardown_rendered = lrm(markdown)
potcols['markdown'] = mardown_rendered
potcols.update(lr.heading_text)
potcols.update(vuln.headers)
potcols['section'] = section.name
row = []
for col in self.columns:
if self.columns[col] in potcols:
row.append(potcols[self.columns[col]])
else:
row.append('')
rows.append(row)
log.info("Writing CSV file")
with open(outputfile, 'w') as fh:
csvw = csv.writer(fh)
csvw.writerows(rows)
return rows | /report_ranger-2.2-py3-none-any.whl/report_ranger/output_formatter/csvformatter.py | 0.671794 | 0.187467 | csvformatter.py | pypi |
from report_ranger.output_formatter.outputformatter import OutputFormatter
import yaml
import html
from report_ranger.table import Table
import logging
import subprocess
log = logging.getLogger(__name__)
default_html_headers = {'template': ''}
html_header_aliases = {
}
class HTMLFormatter(OutputFormatter):
def __init__(self, templateheaders=dict(), timer=None, watcher=None):
OutputFormatter.__init__(self, templateheaders, timer, watcher=watcher)
self.templatefile = self.templateheaders['html_template']
self.figformat = "svg"
def escape(self, text):
''' Escape the given text based on the format we're working with
:param text: a plain text message
:return: the message escaped to appear correctly in LaTeX
'''
return html.escape(text)
def table(self, table, options={}, **tableargs):
''' This function formats the table in either latex or markdown, depending on the output. '''
markdown = ""
t = Table(table, env=self.env, **tableargs)
markdown += "<table>"
for i in range(len(t.table)):
markdown += "\n<tr>"
for j in range(len(t.table[i])):
attrs = ""
if t.colspan[i][j] < 1 or t.rowspan[i][j] < 1:
continue
if t.colspan[i][j] > 1:
attrs += " colspan={}".format(t.colspan[i][j])
if t.rowspan[i][j] > 1:
attrs += " rowspan={}".format(t.rowspan[i][j])
if t.cellstyles[i][j] != '':
attrs += " class='{}'".format(t.cellstyles[i][j])
# cell alignment, first try cellalign then colalign
if t.cellalign[i][j] != '':
alignment = t.cellalign[i][j]
elif t.colalign[j] != '':
alignment = t.colalign[j]
else:
alignment = ''
if alignment != '':
if alignment == 'l':
attrs += " style='text-align: left'"
elif alignment == 'c':
attrs += " style='text-align: center'"
elif alignment == 'r':
attrs += " style='text-align: right'"
elif alignment == 'j':
attrs += " style='text-align: justified'"
markdown += "<td{}>{}</td>".format(attrs,
self.escape(str(t.table[i][j])))
markdown += "</tr>"
markdown += "</table>"
return markdown
def headers(self):
markdown = ''
headers = dict()
headers['title'] = '{{title}}'
headers['date'] = '{{date.strftime("%-d %B %Y")}}'
# Put in the defaults for the latex template
headers.update(default_html_headers)
if "html" in self.template:
for header in html_header_aliases.keys():
if header in self.template['html']:
self.template['html'][html_header_aliases[header]
] = self.template['html'][header]
del self.template['html'][header]
headers.update(self.template['html'])
# Overwrite the headers with the report headers if they are set
htmlheaders = self.env.get("html")
if htmlheaders != None:
for header in html_header_aliases.keys():
if header in htmlheaders:
htmlheaders[html_header_aliases[header]
] = htmlheaders[header]
del htmlheaders
headers.update(htmlheaders)
markdown += yaml.dump(headers)
return markdown
def output(self, markdown, outputfile=''):
output = self._build_markdown(markdown)
log.info("Writing HTML")
# Use Pandoc to print to PDF
pandoc_arguments = ['pandoc', '--from', 'markdown', '--to', 'html',
'--template', self.templatefile, '--listings', '-o', outputfile]
log.info("Running pandoc with arguments {}".format(pandoc_arguments))
process = subprocess.run(pandoc_arguments,
input=output,
universal_newlines=True)
return output | /report_ranger-2.2-py3-none-any.whl/report_ranger/output_formatter/htmlformatter.py | 0.626696 | 0.152473 | htmlformatter.py | pypi |
import csv
import re
from dataclasses import astuple
from datetime import date, datetime
from decimal import Decimal
from pathlib import Path
from typing import Final, Iterable, Literal, TypeVar, cast, overload
from PyQt5.QtWidgets import QTableWidget
from report_tool.exports.formats import (
AccountInfo,
DataToExport,
ExportableSummary,
ExportableTransaction,
Transaction,
)
from report_tool.utils.constants import get_export_dir
from report_tool.utils.settings import read_config
RE_TEXT_BETWEEN_TAGS: Final[re.Pattern[str]] = re.compile(r">(.*?)<")
T = TypeVar("T")
class NothingToExport(Exception):
"""Raised when there is nothing to export."""
def make_comment_transactions(
transactions: Iterable[ExportableTransaction],
) -> str:
dates: list[date] = sorted(
datetime.strptime(t.date, "%d/%m/%y").date() for t in transactions
)
return f"#Transactions from {dates[0]} to {dates[-1]}"
class ExportToExcel:
"""An exported to save to a file in an Excel format."""
def __init__(self, data: DataToExport) -> None:
"""Initialize the exporter.
Args:
data: The data to export.
"""
self._data_to_export: DataToExport = data
self.config: dict = read_config()
@staticmethod
@overload
def clean_value(value: str) -> str:
...
@staticmethod
@overload
def clean_value(value: T) -> T:
...
@staticmethod
def clean_value(value: str | T) -> str | T:
"""Clean value. Remove html tags in strings.
Args:
value: value to clean.
Returns:
Cleaned value.
"""
if isinstance(value, str):
if (groups := RE_TEXT_BETWEEN_TAGS.search(value)) is not None:
return groups.group(1)
return value
def _get_filename(
self,
what_to_export: Literal["all", "transactions", "summary"],
account_info: AccountInfo,
) -> str:
"""Return filename to export to."""
acc_name: str = account_info["Account name: "].lower()
acc_type: str = account_info["Account type: "].lower()
if what_to_export == "summary":
return f"report_tool_{acc_type}_{acc_name}_{what_to_export}_summary.txt"
# constructs a header with date range
dates: list[date] = self.get_transaction_dates(
self._data_to_export["transactions"].values()
)
if not dates:
raise NothingToExport("No transactions to export")
# construct fixed file name
return f"report tool_{acc_type}_{acc_name}_{what_to_export}_from {dates[0]:%Y-%m-%d} to {dates[-1]:%Y-%m-%d}.txt"
def export(self, widget_pos: QTableWidget) -> None:
"""Export data to file."""
config = self.config
what_to_export: Literal["all", "transactions", "summary"] = config[
"what_to_export"
].lower()
try:
filename: str = self._get_filename(
what_to_export, self._data_to_export["current_acc"]
)
filepath: Path = get_export_dir() / filename
except NothingToExport as exc:
print(exc)
return
start_capital = self._data_to_export["start_capital"]
if what_to_export in ["all", "transactions"]:
transactions: list[
ExportableTransaction
] = self._get_exportable_transactions(widget_pos)
self.write_comment_transactions(filepath, transactions=transactions)
self.write_transactions(filepath, transactions, sep=config["separator"])
if what_to_export in ["all", "summary"]:
summary = self._get_exportable_summary()
self.write_comment_summary(
filepath, start_capital=start_capital, config=config
)
self.write_summary(filepath, summary, sep=config["separator"])
def _get_exportable_transactions(
self, widget_pos: QTableWidget
) -> list[ExportableTransaction]:
"""Get exportable transactions from widget."""
nb_row: int = widget_pos.rowCount()
nb_col: int = widget_pos.columnCount()
return [
ExportableTransaction(
*[
cell.text()
for j in range(nb_col)
if (cell := widget_pos.item(i, j)) is not None
]
)
for i in range(nb_row)
]
def _get_exportable_summary(self) -> list[ExportableSummary]:
"""Get exportable summary from internal data.
Returns:
An exportable summary.
"""
return [
ExportableSummary(key=key, value=self.clean_value(cast(str, value)))
for key, value in self._data_to_export["summary"].items()
]
@staticmethod
def get_transaction_dates(
transactions: Iterable[Transaction],
) -> list[date]:
"""Get the dates of transactions.
Args:
transactions: Transactions to get dates from.
Returns:
A list of dates.
"""
return sorted(
datetime.strptime(t["date"], "%d/%m/%y").date() for t in transactions
)
@staticmethod
def make_comment_summary(
*,
is_aggregated: bool,
currency_symbol: str,
is_included: bool,
result_type: str,
start_capital: Decimal,
is_auto_capital: bool,
) -> str:
"""Make a comment for the summary."""
return (
f"#Summary calculated in {result_type.lower()}"
f" | interest {'' if is_included else 'not '}included"
f" | positions {'' if is_aggregated else 'not '}aggregated"
f" | capital initial = {start_capital}{currency_symbol}"
f"{'(auto)' if is_auto_capital else '(manual)'}"
)
def write_comment_transactions(
self, filename: Path, *, transactions: list[ExportableTransaction]
) -> None:
"""Write a comment with the date range of transactions."""
# constructs a header with options
comment = make_comment_transactions(transactions)
with filename.open("a", encoding="utf-8") as f:
f.write(comment + "\n")
@staticmethod
def write_transactions(
filename: Path,
transactions: list[ExportableTransaction],
*,
sep: str = ";",
) -> None:
"""Write transactions to a file."""
with filename.open("a") as fp:
# create csv writer
writer = csv.writer(fp, delimiter=sep, lineterminator="\n")
# write header
writer.writerow(
[
"Date",
"Market",
"Direction",
"Open Size",
"Open",
"Close",
"Points",
"Points/lot",
"Profit/Loss",
]
)
# write transactions
writer.writerows(astuple(t) for t in transactions)
def write_comment_summary(
self, filename: Path, *, start_capital: Decimal, config: dict
) -> None:
"""Write a comment about the summary."""
comment = self.make_comment_summary(
is_aggregated=config["aggregate"] == 2,
currency_symbol=config["currency_symbol"],
is_included=config["include"],
result_type=config["result_in"],
start_capital=start_capital,
is_auto_capital=config["auto_calculate"] == 2,
)
with filename.open("a", encoding="utf-8") as f:
f.write(comment + "\n")
@staticmethod
def write_summary(
filename: Path, summary: list[ExportableSummary], *, sep: str = ";"
) -> None:
"""Write summary to file"""
with filename.open("a") as fp:
# create csv writer
writer = csv.writer(fp, delimiter=sep, lineterminator="\n")
# write summary
writer.writerows(astuple(s) for s in summary) | /report_tool-3.0.0a3.tar.gz/report_tool-3.0.0a3/report_tool/exports/excel.py | 0.883192 | 0.266506 | excel.py | pypi |
import collections
import logging
import queue
import socket
import threading
import time
import urllib.error
import urllib.parse
import urllib.request
from typing import Generator
import requests
from report_tool.logger.handlers import ReportToolFileHandler
from report_tool.utils.constants import get_logs_dir
logs_dir = get_logs_dir()
day = time.strftime("%d")
month = time.strftime("%m")
year = time.strftime("%Y")
logs_dir.mkdir(exist_ok=True)
LOG = logging.getLogger("lightstreamer")
hdlr = ReportToolFileHandler("lightstreamer.log", when="D", backupCount=7)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
hdlr.setFormatter(formatter)
LOG.addHandler(hdlr)
LOG.setLevel(logging.DEBUG)
# Minimum time to wait between retry attempts, in seconds. Subsequent
# reconnects repeatedly double this up to a maximum.
RETRY_WAIT_SECS = 0.125
# Maximum time in seconds between reconnects. Repeatedly failing connects will
# cap the retry backoff at this maximum.
RETRY_WAIT_MAX_SECS = 30.0
# Create and activate a new table. The item group specified in the LS_id
# parameter will be subscribed to and Lightstreamer Server will start sending
# realtime updates to the client immediately.
OP_ADD = "add"
# Creates a new table. The item group specified in the LS_id parameter will be
# subscribed to but Lightstreamer Server will not start sending realtime
# updates to the client immediately.
OP_ADD_SILENT = "add_silent"
# Activate a table previously created with an "add_silent" operation.
# Lightstreamer Server will start sending realtime updates to the client
# immediately.
OP_START = "start"
# Deletes the specified table. All the related items will be unsubscribed to
# and Lightstreamer Server will stop sending realtime updates to the client
# immediately.
OP_DELETE = "delete"
# Session management; forcing closure of an existing session.
OP_DESTROY = "destroy"
# All the itemEvent coming from the Data Adapter must be sent to the client
# unchanged.
MODE_RAW = "RAW"
# The source provides updates of a persisting state (e.g. stock quote updates).
# The absence of a field in an itemEvent is interpreted as "unchanged data".
# Any "holes" must be filled by copying each field with the value in the last
# itemEvent where the field had a value. Not all the itemEvents from the Data
# Adapter need to be sent to the client.
MODE_MERGE = "MERGE"
# The source provides events of the same type (e.g. statistical samplings or
# news). The itemEvents coming from the Data Adapter must be sent to the client
# unchanged. Not all the itemEvents need to be sent to the client.
MODE_DISTINCT = "DISTINCT"
# The itemEvents are interpreted as commands that indicate how to progressively
# modify a list. In the schema t are two fields that are required to
# interpret the itemEvent. The "key" field contains the key that unequivocally
# identifies a line of the list generated from the Item. The "command" field
# contains the command associated with the itemEvent, having a value of "ADD",
# "UPDATE" or "DELETE".
MODE_COMMAND = "COMMAND"
# A session does not yet exist, we're in the process of connecting for the
# first time. Control messages cannot be sent yet.
STATE_CONNECTING = "connecting Lightstreamer session"
# Connected and forwarding messages.
STATE_CONNECTED = "connected Lightstreamer session"
# A session exists, we're just in the process of reconnecting. A healthy
# connection will alternate between RECONNECTING and CONNECTED states as
# LS_content_length is exceeded.
STATE_RECONNECTING = "reconnecting Lightstreamer session"
# Could not connect and will not retry because the server indicated a permanent
# error. After entering this state the thread stops, and session information is
# cleared. You must call create_session() to restart the session. This is the
# default state.
STATE_DISCONNECTED = "disconnected from Lightstreamer"
# Called when the server indicates its internal message queue overflowed.
EVENT_OVERFLOW = "on_overflow"
# Called when an attempted push message could not be delivered.
EVENT_PUSH_ERROR = "on_push_error"
CAUSE_MAP = {
"31": 'closed by the administrator through a "destroy" request',
"32": "closed by the administrator through JMX",
"35": "Adapter does not allow more than one session for the user",
"40": "A manual rebind to the same session has been performed",
}
class Error(Exception):
"""Raised when any operation fails for objects in this module."""
def __init__(self, fmt=None, *args):
if args:
fmt %= args
Exception.__init__(self, fmt or self.__doc__)
class TransientError(Error):
"""A request failed, but a later retry may succeed (e.g. network error)."""
class SessionExpired(Error):
"""Server indicated our session has expired."""
def encode_dict(dct):
"""Make a dict out of the given key/value pairs, but only include values
that are not None."""
return urllib.parse.urlencode([p for p in dct.items() if p[1] is not None])
def _replace_url_host(url, hostname=None):
"""Return the given URL with its host part replaced with `hostname` if it
is not None, otherwise simply return the original URL."""
if not hostname:
return url
parsed = urllib.parse.urlparse(url)
new = [parsed[0], hostname] + list(parsed[2:])
return urllib.parse.urlunparse(new)
def _decode_field(s, prev=None):
"""
Decode a single field according to the Lightstreamer encoding rules.
1. Literal '$' is the empty string.
2. Literal '#' is null (None).
3. Literal '' indicates unchanged since previous update.
4. If the string starts with either '$' or '#', but is not length 1,
trim the first character.
5. Unicode escapes of the form uXXXX are unescaped.
Returns the decoded Unicode string.
"""
if s == "$":
return ""
elif s == "#":
return None
elif s == "":
return prev
elif s[0] in "$#":
s = s[1:]
return s.decode("unicode_escape")
def run_and_log(func, *args, **kwargs):
"""Invoke a function, logging any raised exceptions. Returns False if an
exception was raised, otherwise True."""
try:
func(*args, **kwargs)
return True
except Exception:
LOG.exception("While invoking %r(*%r, **%r)", func, args, kwargs)
def dispatch(lst, *args, **kwargs):
"""Invoke every function in `lst` as func(*args, **kwargs), logging any
exceptions and removing the exception-raising functions."""
for func in list(lst):
if not run_and_log(func, *args, **kwargs):
lst.remove(func)
class WorkQueue(object):
"""Manage a thread and associated queue. The thread executes functions on
the queue as requested."""
def __init__(self):
"""Create an instance."""
self.log = logging.getLogger("WorkQueue")
self.queue = queue.Queue()
self.thread = threading.Thread(target=self._main)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
"""Request the thread stop, then wait for it to comply."""
self.queue.put(None)
self.thread.join()
def push(self, func, *args, **kwargs):
"""Request the thread execute func(*args, **kwargs)."""
self.queue.put((func, args, kwargs))
def _main(self):
"""Thread main; sleep waiting for a function to dispatch."""
while True:
tup = self.queue.get()
if tup is None:
self.log.info("Got shutdown semaphore; exiting.")
return
func, args, kwargs = tup
run_and_log(func, *args, **kwargs)
class Event(object):
"""Manage a list of functions."""
def __init__(self):
"""Create an instance."""
self.listeners = []
def listen(self, func):
"""Subscribe `func` to the event."""
if func not in self.listeners:
self.listeners.append(func)
def unlisten(self, func):
"""Unsubscribe `func` from the event."""
try:
self.listeners.remove(func)
except ValueError:
pass
def fire(self, *args, **kwargs):
"""Call all registered functions, passing `args` and `kwargs`."""
dispatch(self.listeners, *args, **kwargs)
def event_property(name, doc):
"""Return a property that evaluates to an Event, which is stored in the
class instance on first access."""
var_name = "_" + name
def fget(self):
event = getattr(self, var_name, None)
if not event:
event = Event()
setattr(self, var_name, event)
return event
return property(fget, doc=doc)
class Table(object):
"""Lightstreamer table.
Abstracts management of a single table, and wraps incoming data in a
`item_factory` to allow simple conversion to the user's native data format.
Callers should subscribe a function at least to on_update() to receive row
updates, and possibly also on_end_of_snapshot() to know when the first set
of complete rows has been received.
The table is registed with the given `LsClient` during construction.
"""
on_update = event_property(
"on_update",
"""Fired when the client receives a new update message (i.e. data).
Receives 2 arguments: item_id, and msg.""",
)
on_end_of_snapshot = event_property(
"on_end_of_snapshot",
"""Fired when the server indicates the first set of update messages
representing a snapshot have been sent successfully.""",
)
def __init__(
self,
client,
item_ids,
mode=None,
data_adapter=None,
buffer_size=None,
item_factory=None,
max_frequency=None,
schema=None,
selector=None,
silent=False,
snapshot=False,
):
"""Create a new table.
`data_adapter`: optional data adapter name.
`item_ids`: ID of the item group that the table contains.
`schema`: ID of the schema table items should conform to.
`selector`: optional ID of a selector for table items.
`mode`: MODE_* constant describing the subscription mode.
`buffer_size`: requested size of the transmit queue measured in events;
defaults to 1 for MODE_MERGE and MODE_DISTINCT. Set to 0 to
indicate 'maximum possible size'.
`max_frequency`: requested maximum updates per second for table items;
set to "unfiltered" to forward all messages without loss (only
valid for MODE_MERGE, MODE_DISTINCT, MODE_COMMAND), set to 0 for
"no frequency limit", or integer number of updates per second.
`snapshot` indicates whether server should send a snapshot at
subscription time. False for no, True for yes, or integer >= 1 for
'yes, but only send N items.
`silent`: If True, server won't start delivering events until
start() is called.
`item_factory`: Passed a sequence of strings-or-None for each row
received, expected to return some object representing the row.
Defaults to tuple().
"""
assert mode in (None, MODE_RAW, MODE_MERGE, MODE_DISTINCT, MODE_COMMAND)
assert item_ids, "item_ids required"
self.client = client
self.item_ids = item_ids
self.mode = mode or MODE_MERGE
self.data_adapter = data_adapter
self.buffer_size = buffer_size
self.item_factory = item_factory or tuple
self.max_frequency = max_frequency
self.schema = schema
self.selector = selector
self.silent = silent
self.snapshot = snapshot
self._last_item_map = {}
#: This is a dict mapping item IDs to the last known value for
#: the particular item. Note that if no updates have been received
#: for a specified item ID, it will have no entry here.
self.items = {}
client._register(self)
def _dispatch_update(self, item_id, item):
"""Called by LsClient to dispatch a table update line."""
if item == "EOS":
self.on_end_of_snapshot.fire()
return
last = dict(enumerate(self._last_item_map.get(item_id, [])))
fields = [_decode_field(s, last.get(i)) for i, s in enumerate(item)]
self._last_item_map[item_id] = fields
# fields.insert(0, self.item_ids) # insert table ids to know wha it's sent
# self.items[item_id] = self.item_factory(fields)
# self.on_update.fire(item_id, self.items[item_id])
# print(fields)
self.on_update.fire(self.item_ids, fields) # send item_ids to identify it
class LsClient(object):
"""Manages a single Lightstreamer session. Callers are expected to:
* Create an instance and subscribe to on_state().
* Call create_session().
* Create lightstreamer.Table instances, or manually call allocate().
* Subscribe to each Table's on_update().
* Call destroy() to shut down.
create_session() and send_control() calls are completed asynchronously on a
private thread.
"""
on_state = event_property(
"on_state",
"""Subscribe `func` to connection state changes. Sole argument, `state`
is one of the STATE_* constants.""",
)
on_heartbeat = event_property(
"on_heartbeat",
"""Subscribe `func` to heartbeats. The function is called with no
arguments each time the connection receives any data.""",
)
def __init__(
self,
base_url,
work_queue=None,
content_length=None,
timeout_grace=None,
polling_ms=None,
proxies=None,
):
"""Create an instance using `base_url` as the root of the Lightstreamer
server. If `timeout_grace` is given, indicates number of seconds grace
to allow after an expected keepalive fails to arrive before considering
the connection dead."""
self.base_url = base_url
self._work_queue = work_queue or WorkQueue()
self.content_length = content_length
self._timeout_grace = timeout_grace or 1.0
self._polling_ms = 10000
self.proxies = proxies
self._lock = threading.Lock()
self._control_url = None
self.log = logging.getLogger("lightstreamer.LsClient")
self._table_id = 0
self._table_map = {}
self._session = {}
self._state = STATE_DISCONNECTED
self._control_queue = collections.deque()
self._thread = None
def _set_state(self, state):
"""Emit an event indicating the connection state has changed, taking
care not to emit duplicate events."""
if self._state == state:
return
self._state = state
if state == STATE_DISCONNECTED:
self._control_queue.clear()
self.log.debug("New state: %r", state)
self.on_state.fire(state)
def _get_request_timeout(self):
"""Examine the current session to figure out how long we should wait
for any data on the receive connection."""
ms = float(self._session.get("KeepaliveMillis", "0")) or 5000
return self._timeout_grace + (ms / 1000)
def _post(self, suffix, data, base_url=None):
"""Perform an HTTP post to `suffix`, logging before and after. If an
HTTP exception is thrown, log an error and return the exception."""
url = urllib.parse.urljoin(base_url or self.base_url, suffix)
try:
return requests.post(url, data=data, verify=False, proxies=self.proxies)
except urllib.error.HTTPError as e:
self.log.error("HTTP %d for %r", e.getcode(), url)
return e
def _dispatch_update(self, line):
"""Parse an update event line from Lightstreamer, merging it into the
previous version of the row it represents, then dispatch it to the
table's associated listener."""
if not line:
return
bits = line.rstrip("\r\n").split("|")
if bits[0].count(",") < 1:
self.log.warning("Dropping strange update line: %r", line)
return
table_info = bits[0].split(",")
table_id, item_id = int(table_info[0]), int(table_info[1])
table = self._table_map.get(table_id)
if not table:
self.log.debug("Unknown table %r; dropping row", table_id)
return
if table_info[-1] == "EOS":
run_and_log(table._dispatch_update, item_id, "EOS")
else:
run_and_log(table._dispatch_update, item_id, bits[1:])
table_name = table.item_ids
# Constants for _recv_line -> _do_recv communication.
R_OK, R_RECONNECT, R_END = 0, 1, 2
def _recv_line(self, line):
"""Parse a line from Lightstreamer and act accordingly. Returns True to
keep the connection alive, False to indicate time to reconnect, or
raises Terminated to indicate the doesn't like us any more."""
self.on_heartbeat.fire()
if line.startswith("PROBE"):
self.log.debug("Received server probe.")
return self.R_OK
elif line.startswith("LOOP"):
self.log.debug("Server indicated length exceeded; reconnecting.")
return self.R_RECONNECT
elif line.startswith("END"):
cause = CAUSE_MAP.get(line.split()[-1], line)
self.log.info("Session permanently closed; cause: %r", cause)
return self.R_END
else:
# Update event.
self._dispatch_update(line)
return self.R_OK
def _do_recv(self):
"""Connect to bind_session.txt and dispatch messages until the server
tells us to stop or an error occurs."""
self.log.debug("Attempting to connect..")
self._set_state(STATE_CONNECTING)
sessionnum = encode_dict({"LS_session": self._session["SessionId"]})
req = requests.post(
self.control_url + "bind_session.txt",
data=sessionnum,
stream=True,
verify=False,
proxies=self.proxies,
)
line_it = req.iter_lines(chunk_size=1, decode_unicode=True)
self._parse_and_raise_status(req, line_it)
self._parse_session_info(line_it)
self._set_state(STATE_CONNECTED)
self.log.debug(
"Server reported Content-length: %s", req.headers.get("Content-length")
)
for line in line_it:
status = self._recv_line(line)
if status == self.R_END:
return False
elif status == self.R_RECONNECT:
return True
def _is_transient_error(self, e):
if isinstance(e, urllib.error.URLError) and isinstance(e.reason, socket.error):
return True
return isinstance(e, (socket.error, TransientError))
def _recv_main(self):
"""Receive thread main function. Calls _do_recv() in a loop, optionally
delaying if a transient error occurs."""
self.log.debug("receive thread running.")
fail_count = 0
running = True
while running:
try:
running = self._do_recv()
fail_count = 0
except Exception as e:
if not self._is_transient_error(e):
self.log.exception("_do_recv failure")
break
fail_wait = min(
RETRY_WAIT_MAX_SECS, RETRY_WAIT_SECS * (2**fail_count)
)
fail_count += 1
self.log.info(
"Error: %s: %s (reconnect in %.2fs)",
e.__class__.__name__,
e,
fail_wait,
)
self._set_state(STATE_CONNECTING)
time.sleep(fail_wait)
self._set_state(STATE_DISCONNECTED)
self._thread = None
self._session.clear()
self._control_url = None
self.log.debug("Receive thread exiting")
@staticmethod
def _parse_and_raise_status(req, line_it: Generator[str, None, None]):
"""Parse the status part of a control/session create/bind response.
Either a single "OK", or "ERROR" followed by the error description. If
ERROR, raise RequestFailed.
"""
if req.status_code != 200:
raise TransientError("HTTP status %d", req.status_code)
status = next(line_it)
if status.startswith("SYNC ERROR"):
raise SessionExpired()
if not status.startswith("OK"):
raise TransientError("%s %s: %s" % (status, next(line_it), next(line_it)))
def _parse_session_info(self, line_it):
"""Parse the headers from `fp` sent immediately following an OK
message, and store them in self._session."""
# Requests' iter_lines() has some issues with \r.
blanks = 0
for line in line_it:
if line:
blanks = 0
key, value = line.rstrip().split(":", 1)
self._session[key] = value
else:
blanks += 1
if blanks == 2:
break
self.control_url = _replace_url_host(
self.base_url, self._session.get("ControlAddress")
)
assert self._session, "Session parse failure"
def _create_session_impl(self, dct):
"""Worker for create_session()."""
assert self._state == STATE_DISCONNECTED
self._set_state(STATE_CONNECTING)
try:
req = self._post("create_session.txt", encode_dict(dct))
for line in req.text.splitlines():
if line.startswith("ControlAddress:"):
_, control_address = line.split(":", maxsplit=1)
break
else:
raise ValueError(
"`ControlAddress` not found. Frame was: `%s`", req.text
)
line_it = req.iter_lines(chunk_size=1, decode_unicode=True)
self._parse_and_raise_status(req, line_it)
self.control_url = control_address
except Exception:
self._set_state(STATE_DISCONNECTED)
raise
self._parse_session_info(line_it)
for table in self._table_map.values():
self._enqueue_table_create(table)
self._thread = threading.Thread(target=self._recv_main)
self._thread.setDaemon(True)
self._thread.start()
def create_session(
self,
username,
adapter_set,
password=None,
max_bandwidth_kbps=None,
content_length=None,
keepalive_ms=None,
):
"""Begin authenticating with Lightstreamer and start the receive
thread.
`username` is the Lightstreamer username (required).
`adapter_set` is the adapter set name to use (required).
`password` is the Lightstreamer password.
`max_bandwidth_kbps` indicates the highest transmit rate of the
server in Kbps. Server's default is used if unspecified.
`content_length` is the maximum size of the HTTP entity body before the
server requests we reconnect; larger values reduce jitter. Server's
default is used if unspecified.
`keepalive_ms` is the minimum time in milliseconds between PROBE
messages when the server otherwise has nothing to say. Server's
default is used if unspecified.
"""
assert self._state == STATE_DISCONNECTED, (
"create_session() called while state %r" % self._state
)
self._work_queue.push(
self._create_session_impl,
{
"LS_user": username,
# 'LS_adapter_set': adapter_set,
"LS_report_info": "true",
"LS_polling": "true",
"LS_polling_millis": self._polling_ms,
"LS_password": password,
"LS_requested_max_bandwidth": max_bandwidth_kbps,
"LS_content_length": content_length,
"LS_keepalive_millis": keepalive_ms,
},
)
def join(self):
"""Wait for the receive thread to terminate."""
if self._thread:
self._thread.join()
def _send_control_impl(self):
"""Worker function for send_control()."""
assert self._session["SessionId"]
if not self._control_queue:
return
limit = int(self._session.get("RequestLimit", "50000"))
bits = []
size = 0
with self._lock:
while self._control_queue:
op = self._control_queue[0]
op["LS_session"] = self._session["SessionId"]
encoded = encode_dict(op)
if (size + len(encoded) + 2) > limit:
break
bits.append(encoded)
size += len(encoded) + 2
self._control_queue.popleft()
req = self._post(
"control.txt", data="\r\n".join(bits), base_url=self.control_url
)
self._parse_and_raise_status(req, req.iter_lines(decode_unicode=True))
self.log.debug("Control message successful.")
def _enqueue_table_create(self, table):
self._send_control(
{
"LS_table": table.table_id,
"LS_op": OP_ADD_SILENT if table.silent else OP_ADD,
"LS_data_adapter": table.data_adapter,
"LS_id": table.item_ids,
"LS_schema": table.schema,
"LS_selector": table.selector,
"LS_mode": table.mode,
"LS_requested_buffer_size": table.buffer_size,
"LS_requested_max_frequency": table.max_frequency,
"LS_snapshot": table.snapshot and "true",
}
)
def _register(self, table):
"""Register `table` with the session."""
with self._lock:
self._table_id += 1
table.table_id = self._table_id
self._table_map[table.table_id] = table
if self._state in (STATE_CONNECTED, STATE_RECONNECTING):
self._enqueue_table_create(table)
def start(self, table):
"""If a table was created with silent=True, instruct the server to
start delivering updates."""
with self._lock:
self._send_control({"LS_op": OP_START, "LS_table": table.table_id})
def delete(self, table):
"""Instruct the server and LsClient to discard the given table."""
with self._lock:
# self._table_map.pop(table_id, None)
self._send_control({"LS_op": OP_DELETE, "LS_table": table.table_id})
def _send_control(self, dct):
self._control_queue.append(dct)
self._work_queue.push(self._send_control_impl)
def destroy(self):
"""Request the server destroy our session."""
self._send_control({"LS_op": OP_DESTROY}) | /report_tool-3.0.0a3.tar.gz/report_tool-3.0.0a3/report_tool/communications/ig_lightstreamer.py | 0.717309 | 0.167423 | ig_lightstreamer.py | pypi |
import json
import logging
from datetime import date, datetime, time
from decimal import Decimal
from pathlib import Path, PosixPath
from typing import Any, Callable, Mapping, TypedDict, TypeVar
from PyQt5.QtCore import QByteArray
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S.%f"
DATETIME_FORMAT = f"{DATE_FORMAT} {TIME_FORMAT}"
logger = logging.getLogger(__name__)
class EncodedValue(TypedDict):
"""A value that can be encoded in JSON."""
_type: str
value: str
class RoundTripEncoder(json.JSONEncoder):
def default(self, obj: Any) -> EncodedValue:
if isinstance(obj, PosixPath):
return {
"_type": "path",
"value": str(obj),
}
if isinstance(obj, datetime):
return {
"_type": "datetime.datetime",
"value": obj.strftime(DATETIME_FORMAT),
}
if isinstance(obj, date):
return {
"_type": "datetime.date",
"value": obj.strftime(DATE_FORMAT),
}
if isinstance(obj, time):
return {
"_type": "datetime.time",
"value": obj.strftime(TIME_FORMAT),
}
if isinstance(obj, Decimal):
return {
"_type": "decimal.Decimal",
"value": str(obj),
}
if isinstance(obj, QByteArray):
return {
"_type": "PyQt5.QtCore.QByteArray",
"value": obj.toBase64().data().decode(),
}
raise TypeError(
f"Object of type {obj.__class__.__name__} " f"is not JSON serializable"
)
InputT = TypeVar("InputT", bound=Mapping[str, Any])
class RoundTripDecoder(json.JSONDecoder):
def __init__(
self,
*,
object_hook: Callable[[dict[str, Any]], Any | None] | None = None,
parse_float: Callable[[str], Any | None] | None = None,
parse_int: Callable[[str], Any | None] | None = None,
parse_constant: Callable[[str], Any | None] | None = None,
strict: bool = True,
object_pairs_hook: Callable[[list[tuple[str, Any]]], Any | None] | None = None,
) -> None:
if object_hook is None:
object_hook = self.object_hook
super().__init__(
object_hook=object_hook,
parse_float=parse_float,
parse_int=parse_int,
parse_constant=parse_constant,
strict=strict,
object_pairs_hook=object_pairs_hook,
)
@staticmethod
def object_hook(
obj: InputT,
) -> InputT | datetime | date | time | Decimal | Path | None:
if "_type" not in obj:
return obj
type_ = obj["_type"]
if type_ == "datetime.datetime":
return datetime.strptime(obj["value"], DATETIME_FORMAT)
if type_ == "datetime.date":
return datetime.strptime(obj["value"], DATE_FORMAT).date()
if type_ == "datetime.time":
return datetime.strptime(obj["value"], TIME_FORMAT).time()
if type_ == "decimal.Decimal":
return Decimal(obj["value"])
if type_ == "path":
return Path(obj["value"])
if type_ == "PyQt5.QtCore.QByteArray":
return QByteArray.fromBase64(obj["value"].encode())
logger.warning(f"Unknown type for Json Decoded: {type_}.")
return obj
if __name__ == "__main__":
data = {
"name": "Report O'Toole",
"dt": datetime.now(),
"d": datetime.now().date(),
"t": datetime.now().time(),
"value": Decimal("42.23"),
}
json_str = json.dumps(data, cls=RoundTripEncoder, indent=2)
print(json_str)
data_out = json.loads(json_str, cls=RoundTripDecoder)
assert data == data_out
print("Success") | /report_tool-3.0.0a3.tar.gz/report_tool-3.0.0a3/report_tool/utils/json_utils.py | 0.708414 | 0.196807 | json_utils.py | pypi |
__author__ = "Mariano Reingart <reingart@gmail.com>"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
# Inspired by tuto5.py and several examples from fpdf.org, html2fpdf, etc.
import html
import logging
from html.parser import HTMLParser
from .enums import XPos, YPos
LOGGER = logging.getLogger(__name__)
BULLET_WIN1252 = "\x95" # BULLET character in Windows-1252 encoding
DEFAULT_HEADING_SIZES = dict(h1=24, h2=18, h3=14, h4=12, h5=10, h6=8)
COLOR_DICT = {
"black": "#000000",
"navy": "#000080",
"darkblue": "#00008b",
"mediumblue": "#0000cd",
"blue": "#0000ff",
"darkgreen": "#006400",
"green": "#008000",
"teal": "#008080",
"darkcyan": "#008b8b",
"deepskyblue": "#00bfff",
"darkturquoise": "#00ced1",
"mediumspringgreen": "#00fa9a",
"lime": "#00ff00",
"springgreen": "#00ff7f",
"aqua": "#00ffff",
"cyan": "#00ffff",
"midnightblue": "#191970",
"dodgerblue": "#1e90ff",
"lightseagreen": "#20b2aa",
"forestgreen": "#228b22",
"seagreen": "#2e8b57",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"limegreen": "#32cd32",
"mediumseagreen": "#3cb371",
"turquoise": "#40e0d0",
"royalblue": "#4169e1",
"steelblue": "#4682b4",
"darkslateblue": "#483d8b",
"mediumturquoise": "#48d1cc",
"indigo": "#4b0082",
"darkolivegreen": "#556b2f",
"cadetblue": "#5f9ea0",
"cornflowerblue": "#6495ed",
"rebeccapurple": "#663399",
"mediumaquamarine": "#66cdaa",
"dimgray": "#696969",
"dimgrey": "#696969",
"slateblue": "#6a5acd",
"olivedrab": "#6b8e23",
"slategray": "#708090",
"slategrey": "#708090",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"mediumslateblue": "#7b68ee",
"lawngreen": "#7cfc00",
"chartreuse": "#7fff00",
"aquamarine": "#7fffd4",
"maroon": "#800000",
"purple": "#800080",
"olive": "#808000",
"gray": "#808080",
"grey": "#808080",
"skyblue": "#87ceeb",
"lightskyblue": "#87cefa",
"blueviolet": "#8a2be2",
"darkred": "#8b0000",
"darkmagenta": "#8b008b",
"saddlebrown": "#8b4513",
"darkseagreen": "#8fbc8f",
"lightgreen": "#90ee90",
"mediumpurple": "#9370db",
"darkviolet": "#9400d3",
"palegreen": "#98fb98",
"darkorchid": "#9932cc",
"yellowgreen": "#9acd32",
"sienna": "#a0522d",
"brown": "#a52a2a",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"lightblue": "#add8e6",
"greenyellow": "#adff2f",
"paleturquoise": "#afeeee",
"lightsteelblue": "#b0c4de",
"powderblue": "#b0e0e6",
"firebrick": "#b22222",
"darkgoldenrod": "#b8860b",
"mediumorchid": "#ba55d3",
"rosybrown": "#bc8f8f",
"darkkhaki": "#bdb76b",
"silver": "#c0c0c0",
"mediumvioletred": "#c71585",
"indianred": "#cd5c5c",
"peru": "#cd853f",
"chocolate": "#d2691e",
"tan": "#d2b48c",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"thistle": "#d8bfd8",
"orchid": "#da70d6",
"goldenrod": "#daa520",
"palevioletred": "#db7093",
"crimson": "#dc143c",
"gainsboro": "#dcdcdc",
"plum": "#dda0dd",
"burlywood": "#deb887",
"lightcyan": "#e0ffff",
"lavender": "#e6e6fa",
"darksalmon": "#e9967a",
"violet": "#ee82ee",
"palegoldenrod": "#eee8aa",
"lightcoral": "#f08080",
"khaki": "#f0e68c",
"aliceblue": "#f0f8ff",
"honeydew": "#f0fff0",
"azure": "#f0ffff",
"sandybrown": "#f4a460",
"wheat": "#f5deb3",
"beige": "#f5f5dc",
"whitesmoke": "#f5f5f5",
"mintcream": "#f5fffa",
"ghostwhite": "#f8f8ff",
"salmon": "#fa8072",
"antiquewhite": "#faebd7",
"linen": "#faf0e6",
"lightgoldenrodyellow": "#fafad2",
"oldlace": "#fdf5e6",
"red": "#ff0000",
"fuchsia": "#ff00ff",
"magenta": "#ff00ff",
"deeppink": "#ff1493",
"orangered": "#ff4500",
"tomato": "#ff6347",
"hotpink": "#ff69b4",
"coral": "#ff7f50",
"darkorange": "#ff8c00",
"lightsalmon": "#ffa07a",
"orange": "#ffa500",
"lightpink": "#ffb6c1",
"pink": "#ffc0cb",
"gold": "#ffd700",
"peachpuff": "#ffdab9",
"navajowhite": "#ffdead",
"moccasin": "#ffe4b5",
"bisque": "#ffe4c4",
"mistyrose": "#ffe4e1",
"blanchedalmond": "#ffebcd",
"papayawhip": "#ffefd5",
"lavenderblush": "#fff0f5",
"seashell": "#fff5ee",
"cornsilk": "#fff8dc",
"lemonchiffon": "#fffacd",
"floralwhite": "#fffaf0",
"snow": "#fffafa",
"yellow": "#ffff00",
"lightyellow": "#ffffe0",
"ivory": "#fffff0",
"white": "#ffffff",
}
def px2mm(px):
return int(px) * 25.4 / 72
def color_as_decimal(color="#000000"):
if not color:
return None
# Checks if color is a name and gets the hex value
hexcolor = COLOR_DICT.get(color.lower(), color)
if len(hexcolor) == 4:
r = int(hexcolor[1] * 2, 16)
g = int(hexcolor[2] * 2, 16)
b = int(hexcolor[3] * 2, 16)
return r, g, b
r = int(hexcolor[1:3], 16)
g = int(hexcolor[3:5], 16)
b = int(hexcolor[5:7], 16)
return r, g, b
class HTML2FPDF(HTMLParser):
"""Render basic HTML to FPDF"""
def __init__(
self,
pdf,
image_map=None,
li_tag_indent=5,
table_line_separators=False,
ul_bullet_char=BULLET_WIN1252,
heading_sizes=None,
**_,
):
"""
Args:
pdf (FPDF): an instance of `fpdf.FPDF`
image_map (function): an optional one-argument function that map <img> "src"
to new image URLs
li_tag_indent (int): numeric indentation of <li> elements
table_line_separators (bool): enable horizontal line separators in <table>
ul_bullet_char (str): bullet character for <ul> elements
"""
super().__init__()
self.pdf = pdf
self.image_map = image_map or (lambda src: src)
self.li_tag_indent = li_tag_indent
self.table_line_separators = table_line_separators
self.ul_bullet_char = ul_bullet_char
self.style = dict(b=False, i=False, u=False)
self.href = ""
self.align = ""
self.page_links = {}
self.font_stack = []
self.indent = 0
self.bullet = []
self.font_size = pdf.font_size_pt
self.set_font(pdf.font_family or "times", size=self.font_size)
self.font_color = 0, 0, 0 # initialize font color, r,g,b format
self.table = None # table attributes
self.table_col_width = None # column (header) widths
self.table_col_index = None # current column index
self.td = None # inside a <td>, attributes dict
self.th = None # inside a <th>, attributes dict
self.tr = None # inside a <tr>, attributes dict
self.thead = None # inside a <thead>, attributes dict
self.tfoot = None # inside a <tfoot>, attributes dict
self.tr_index = None # row index
self.theader = None # table header cells
self.tfooter = None # table footer cells
self.theader_out = self.tfooter_out = False
self.table_row_height = 0
self.heading_level = None
self.heading_sizes = dict(**DEFAULT_HEADING_SIZES)
if heading_sizes:
self.heading_sizes.update(heading_sizes)
self._only_imgs_in_td = False
def width2unit(self, length):
"Handle conversion of % measures into the measurement unit used"
if length[-1] == "%":
total = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
if self.table["width"][-1] == "%":
total *= int(self.table["width"][:-1]) / 100
return int(length[:-1]) * total / 100
return int(length)
def handle_data(self, data):
if self.td is not None: # drawing a table?
self._insert_td(data)
elif self.table is not None:
# ignore anything else than td inside a table
pass
elif self.align:
LOGGER.debug("align '%s'", data.replace("\n", "\\n"))
self.pdf.multi_cell(
0,
self.h,
data,
border=0,
new_x=XPos.LMARGIN,
new_y=YPos.NEXT,
align=self.align[0].upper(),
link=self.href,
)
else:
data = data.replace("\n", " ")
if self.href:
self.put_link(data)
else:
if self.heading_level:
self.pdf.start_section(data, self.heading_level - 1)
LOGGER.debug("write '%s' h=%d", data.replace("\n", "\\n"), self.h)
self.pdf.write(self.h, data)
def _insert_td(self, data=""):
self._only_imgs_in_td = False
width = self._td_width()
height = int(self.td.get("height", 0)) // 4 or self.h * 1.30
if not self.table_row_height:
self.table_row_height = height
elif self.table_row_height > height:
height = self.table_row_height
border = int(self.table.get("border", 0))
if self.th:
self.set_style("B", True)
border = border or "B"
align = self.td.get("align", "C")[0].upper()
else:
align = self.td.get("align", "L")[0].upper()
border = border and "LR"
bgcolor = color_as_decimal(self.td.get("bgcolor", self.tr.get("bgcolor", "")))
# parsing table header/footer (drawn later):
if self.thead is not None:
self.theader.append(
(
dict(
w=width,
h=height,
txt=data,
border=border,
new_x=XPos.RIGHT,
new_y=YPos.TOP,
align=align,
),
bgcolor,
)
)
if self.tfoot is not None:
self.tfooter.append(
(
dict(
w=width,
h=height,
txt=data,
border=border,
new_x=XPos.RIGHT,
new_y=YPos.TOP,
align=align,
),
bgcolor,
)
)
# check if reached end of page, add table footer and header:
if self.tfooter:
height += self.tfooter[0][0]["h"]
if self.pdf.y + height > self.pdf.page_break_trigger and not self.th:
self.output_table_footer()
self.pdf.add_page(same=True)
self.theader_out = self.tfooter_out = False
if self.tfoot is None and self.thead is None:
if not self.theader_out:
self.output_table_header()
self.box_shadow(width, height, bgcolor)
# self.pdf.x may have shifted due to <img> inside <td>:
self.pdf.set_x(self._td_x())
LOGGER.debug(
"td cell x=%d width=%d height=%d border=%s align=%s '%s'",
self.pdf.x,
width,
height,
border,
align,
data.replace("\n", "\\n"),
)
self.pdf.cell(
width,
height,
data,
border=border,
align=align,
new_x=XPos.RIGHT,
new_y=YPos.TOP,
)
def _td_x(self):
"Return the current table cell left side horizontal position"
prev_cells_total_width = sum(
self.width2unit(width)
for width in self.table_col_width[: self.table_col_index]
)
return self.table_offset + prev_cells_total_width
def _td_width(self):
"Return the current table cell width"
# pylint: disable=raise-missing-from
if "width" in self.td:
column_widths = [self.td["width"]]
elif "colspan" in self.td:
i = self.table_col_index
colspan = int(self.td["colspan"])
column_widths = self.table_col_width[i : i + colspan]
else:
try:
column_widths = [self.table_col_width[self.table_col_index]]
except IndexError:
raise ValueError(
f"Width not specified for table column {self.table_col_index},"
" unable to continue"
)
return sum(self.width2unit(width) for width in column_widths)
def box_shadow(self, w, h, bgcolor):
LOGGER.debug("box_shadow w=%d h=%d bgcolor=%s", w, h, bgcolor)
if bgcolor:
fill_color = self.pdf.fill_color
self.pdf.set_fill_color(*bgcolor)
self.pdf.rect(self.pdf.x, self.pdf.y, w, h, "F")
self.pdf.fill_color = fill_color
def output_table_header(self):
if self.theader:
b = self.style.get("b")
self.pdf.set_x(self.table_offset)
self.set_style("b", True)
for celldict, bgcolor in self.theader:
self.box_shadow(celldict["w"], celldict["h"], bgcolor)
self.pdf.cell(**celldict) # includes the border
self.set_style("b", b)
self.pdf.ln(self.theader[0][0]["h"])
self.pdf.set_x(self.table_offset)
# self.pdf.set_x(prev_x)
self.theader_out = True
def output_table_footer(self):
if self.tfooter:
x = self.pdf.x
self.pdf.set_x(self.table_offset)
for celldict, bgcolor in self.tfooter:
self.box_shadow(celldict["w"], celldict["h"], bgcolor)
self.pdf.cell(**celldict)
self.pdf.ln(self.tfooter[0][0]["h"])
self.pdf.set_x(x)
if self.table.get("border"):
self.output_table_sep()
self.tfooter_out = True
def output_table_sep(self):
x1 = self.pdf.x
y1 = self.pdf.y
width = sum(self.width2unit(length) for length in self.table_col_width)
self.pdf.line(x1, y1, x1 + width, y1)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
LOGGER.debug("STARTTAG %s %s", tag, attrs)
if tag in ("b", "i", "u"):
self.set_style(tag, True)
if tag == "a":
self.href = attrs["href"]
if tag == "br":
self.pdf.ln(self.h)
if tag == "p":
self.pdf.ln(self.h)
if attrs:
self.align = attrs.get("align")
if tag in self.heading_sizes:
self.font_stack.append((self.font_face, self.font_size, self.font_color))
self.heading_level = int(tag[1:])
hsize = self.heading_sizes[tag]
self.pdf.set_text_color(150, 0, 0)
self.set_font(size=hsize)
self.pdf.ln(self.h)
if attrs:
self.align = attrs.get("align")
if tag == "hr":
self.pdf.add_page(same=True)
if tag == "pre":
self.font_stack.append((self.font_face, self.font_size, self.font_color))
self.set_font("courier", 11)
if tag == "blockquote":
self.pdf.set_text_color(100, 0, 45)
self.indent += 1
self.pdf.ln(3)
if tag == "ul":
self.indent += 1
self.bullet.append(self.ul_bullet_char)
if tag == "ol":
self.indent += 1
self.bullet.append(0)
if tag == "li":
self.pdf.ln(self.h + 2)
self.pdf.set_text_color(190, 0, 0)
bullet = self.bullet[self.indent - 1]
if not isinstance(bullet, str):
bullet += 1
self.bullet[self.indent - 1] = bullet
bullet = f"{bullet}. "
self.pdf.write(self.h, f"{' ' * self.li_tag_indent * self.indent}{bullet} ")
self.set_text_color(*self.font_color)
if tag == "font":
# save previous font state:
self.font_stack.append((self.font_face, self.font_size, self.font_color))
if "color" in attrs:
color = color_as_decimal(attrs["color"])
self.font_color = color
if "face" in attrs:
face = attrs.get("face").lower()
try:
self.pdf.set_font(face)
self.font_face = face
except RuntimeError:
pass # font not found, ignore
if "size" in attrs:
self.font_size = int(attrs.get("size"))
self.set_font()
self.set_text_color(*self.font_color)
if tag == "table":
self.table = {k.lower(): v for k, v in attrs.items()}
if "width" not in self.table:
self.table["width"] = "100%"
if self.table["width"][-1] == "%":
w = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
w *= int(self.table["width"][:-1]) / 100
self.table_offset = (self.pdf.w - w) / 2
self.table_col_width = []
self.theader_out = self.tfooter_out = False
self.theader = []
self.tfooter = []
self.thead = None
self.tfoot = None
self.pdf.ln()
if tag == "tr":
self.tr_index = 0 if self.tr_index is None else (self.tr_index + 1)
self.tr = {k.lower(): v for k, v in attrs.items()}
self.table_col_index = 0
self.table_row_height = 0
self.pdf.set_x(self.table_offset)
# Adding an horizontal line separator between rows:
if self.table_line_separators and self.tr_index > 0:
self.output_table_sep()
if tag == "td":
self.td = {k.lower(): v for k, v in attrs.items()}
if "width" in self.td and self.table_col_index >= len(self.table_col_width):
assert self.table_col_index == len(
self.table_col_width
), f"table_col_index={self.table_col_index} #table_col_width={len(self.table_col_width)}"
self.table_col_width.append(self.td["width"])
if attrs:
self.align = attrs.get("align")
self._only_imgs_in_td = False
if tag == "th":
self.td = {k.lower(): v for k, v in attrs.items()}
self.th = True
if "width" in self.td and self.table_col_index >= len(self.table_col_width):
assert self.table_col_index == len(
self.table_col_width
), f"table_col_index={self.table_col_index} #table_col_width={len(self.table_col_width)}"
self.table_col_width.append(self.td["width"])
if tag == "thead":
self.thead = {}
if tag == "tfoot":
self.tfoot = {}
if tag == "img" and "src" in attrs:
width = px2mm(attrs.get("width", 0))
height = px2mm(attrs.get("height", 0))
if self.pdf.y + height > self.pdf.page_break_trigger:
self.pdf.add_page(same=True)
y = self.pdf.get_y()
if self.table_col_index is not None:
self._only_imgs_in_td = True
# <img> in a <td>: its width must not exceed the cell width:
td_width = self._td_width()
if not width or width > td_width:
if width: # Preserving image aspect ratio:
height *= td_width / width
width = td_width
x = self._td_x()
if self.align and self.align[0].upper() == "C":
x += (td_width - width) / 2
else:
x = self.pdf.get_x()
if self.align and self.align[0].upper() == "C":
x = self.pdf.w / 2 - width / 2
LOGGER.debug(
'image "%s" x=%d y=%d width=%d height=%d',
attrs["src"],
x,
y,
width,
height,
)
self.pdf.image(
self.image_map(attrs["src"]), x, y, width, height, link=self.href
)
self.pdf.set_x(x + width)
if self.table_col_index is not None:
# <img> in a <td>: we grow the cell height according to the image height:
if height > self.table_row_height:
self.table_row_height = height
else:
self.pdf.set_y(y + height)
if tag in ("b", "i", "u"):
self.set_style(tag, True)
if tag == "center":
self.align = "Center"
if tag == "toc":
self.pdf.insert_toc_placeholder(
self.render_toc, pages=int(attrs.get("pages", 1))
)
def handle_endtag(self, tag):
# Closing tag
LOGGER.debug("ENDTAG %s", tag)
if tag in self.heading_sizes:
self.heading_level = None
face, size, color = self.font_stack.pop()
self.set_font(face, size)
self.set_text_color(*color)
self.pdf.ln(self.h)
self.align = None
if tag == "pre":
face, size, color = self.font_stack.pop()
self.set_font(face, size)
self.set_text_color(*color)
if tag == "blockquote":
self.set_text_color(*self.font_color)
self.indent -= 1
self.pdf.ln(3)
if tag == "strong":
tag = "b"
if tag == "em":
tag = "i"
if tag in ("b", "i", "u"):
self.set_style(tag, False)
if tag == "a":
self.href = ""
if tag == "p":
self.pdf.ln(self.h)
self.align = ""
if tag in ("ul", "ol"):
self.indent -= 1
self.bullet.pop()
if tag == "table":
if not self.tfooter_out:
self.output_table_footer()
self.table = None
self.th = False
self.theader = None
self.tfooter = None
self.pdf.ln(self.h)
self.tr_index = None
if tag == "thead":
self.thead = None
self.tr_index = None
if tag == "tfoot":
self.tfoot = None
self.tr_index = None
if tag == "tbody":
self.tbody = None
self.tr_index = None
if tag == "tr":
if self.tfoot is None:
self.pdf.ln(self.table_row_height)
self.table_col_index = None
self.tr = None
if tag in ("td", "th"):
if self.th:
LOGGER.debug("revert style")
self.set_style("b", False) # revert style
elif self._only_imgs_in_td:
self._insert_td()
self.table_col_index += int(self.td.get("colspan", "1"))
self.td = None
self.th = False
if tag == "font":
# recover last font state
face, size, color = self.font_stack.pop()
self.font_color = color
self.set_font(face, size)
self.set_text_color(*self.font_color)
if tag == "center":
self.align = None
def set_font(self, face=None, size=None):
if face:
self.font_face = face
if size:
self.font_size = size
self.h = size / 72 * 25.4
LOGGER.debug("H %s", self.h)
style = "".join(s for s in ("b", "i", "u") if self.style.get(s)).upper()
if (self.font_face, style) != (self.pdf.font_family, self.pdf.font_style):
self.pdf.set_font(self.font_face, style, self.font_size)
if self.font_size != self.pdf.font_size:
self.pdf.set_font_size(self.font_size)
def set_style(self, tag=None, enable=False):
# Modify style and select corresponding font
if tag:
self.style[tag.lower()] = enable
style = "".join(s for s in ("b", "i", "u") if self.style.get(s))
LOGGER.debug("SET_FONT_STYLE %s", style)
self.pdf.set_font(style=style)
def set_text_color(self, r=None, g=0, b=0):
self.pdf.set_text_color(r, g, b)
def put_link(self, txt):
# Put a hyperlink
self.set_text_color(0, 0, 255)
self.set_style("u", True)
self.pdf.write(self.h, txt, self.href)
self.set_style("u", False)
self.set_text_color(*self.font_color)
def render_toc(self, pdf, outline):
"This method can be overriden by subclasses to customize the Table of Contents style."
pdf.ln()
for section in outline:
link = pdf.add_link()
pdf.set_link(link, page=section.page_number)
text = f'{" " * section.level * 2} {section.name}'
text += f' {"." * (60 - section.level*2 - len(section.name))} {section.page_number}'
pdf.multi_cell(
w=pdf.epw,
h=pdf.font_size,
txt=text,
new_x=XPos.LMARGIN,
new_y=YPos.NEXT,
link=link,
)
# Subclasses of _markupbase.ParserBase must implement this:
def error(self, message):
raise RuntimeError(message)
class HTMLMixin:
HTML2FPDF_CLASS = HTML2FPDF
def write_html(self, text, *args, **kwargs):
"""Parse HTML and convert it to PDF"""
kwargs2 = vars(self)
# Method arguments must override class & instance attributes:
kwargs2.update(kwargs)
h2p = self.HTML2FPDF_CLASS(self, *args, **kwargs2)
text = html.unescape(text) # To deal with HTML entities
h2p.feed(text) | /reportbro-fpdf2-0.9.1.tar.gz/reportbro-fpdf2-0.9.1/fpdf/html.py | 0.516352 | 0.425426 | html.py | pypi |
from abc import ABC
from binascii import hexlify
from codecs import BOM_UTF16_BE
import re
from .util import object_id_for_page
def clear_empty_fields(d):
return {k: v for k, v in d.items() if v}
def create_dictionary_string(
dict_,
open_dict="<<",
close_dict=">>",
field_join="\n",
key_value_join=" ",
has_empty_fields=False,
):
"""format dictionary as PDF dictionary
@param dict_: dictionary of values to render
@param open_dict: string to open PDF dictionary
@param close_dict: string to close PDF dictionary
@param field_join: string to join fields with
@param key_value_join: string to join key to value with
@param has_empty_fields: whether or not to clear_empty_fields first.
"""
if has_empty_fields:
dict_ = clear_empty_fields(dict_)
return "".join(
[
open_dict,
field_join.join(key_value_join.join((k, str(v))) for k, v in dict_.items()),
close_dict,
]
)
def create_list_string(list_):
"""format list of strings as PDF array"""
return f"[{' '.join(list_)}]"
def iobj_ref(n):
"""format an indirect PDF Object reference from its id number"""
return f"{n} 0 R"
def create_stream(stream):
if isinstance(stream, (bytearray, bytes)):
stream = str(stream, "latin-1")
return "\n".join(["stream", stream, "endstream"])
class Raw(str):
"""str subclass signifying raw data to be directly emitted to PDF without transformation."""
class Name(str):
"""str subclass signifying a PDF name, which are emitted differently than normal strings."""
NAME_ESC = re.compile(
b"[^" + bytes(v for v in range(33, 127) if v not in b"()<>[]{}/%#\\") + b"]"
)
def pdf_repr(self) -> str:
escaped = self.NAME_ESC.sub(
lambda m: b"#%02X" % m[0][0], self.encode()
).decode()
return f"/{escaped}"
class PDFObject:
"""
Main features of this class:
* delay ID assignement
* implement serializing
To ensure consistency on how the serialize() method operates,
child classes must define a __slots__ attribute.
"""
# pylint: disable=redefined-builtin
def __init__(self, id=None):
self._id = id
@property
def id(self):
if self._id is None:
raise AttributeError(
f"{self.__class__.__name__} has not been assigned an ID yet"
)
return self._id
@id.setter
def id(self, n):
self._id = n
@property
def ref(self):
return iobj_ref(self.id)
def serialize(self, fpdf=None, obj_dict=None):
output = []
if fpdf:
# pylint: disable=protected-access
appender = fpdf._out
assert (
fpdf._newobj() == self.id
), "Something went wrong in StructTree object IDs assignment"
else:
appender = output.append
appender(f"{self.id} 0 obj")
appender("<<")
if not obj_dict:
obj_dict = self._build_obj_dict()
appender(create_dictionary_string(obj_dict, open_dict="", close_dict=""))
appender(">>")
appender("endobj")
return "\n".join(output)
def _build_obj_dict(self):
"""
Build the PDF Object associative map to serialize,
based on this class instance properties.
The property names are converted from snake_case to CamelCase,
and prefixed with a slash character "/".
"""
return build_obj_dict({key: getattr(self, key) for key in dir(self)})
def build_obj_dict(key_values):
"""
Build the PDF Object associative map to serialize, based on a key-values dict.
The property names are converted from snake_case to CamelCase,
and prefixed with a slash character "/".
"""
obj_dict = {}
for key, value in key_values.items():
if (
callable(value)
or key.startswith("_")
or key in ("id", "ref")
or value is None
):
continue
if hasattr(value, "value"): # e.g. Enum subclass
value = value.value
if isinstance(value, PDFObject): # indirect object reference
value = value.ref
elif hasattr(value, "pdf_repr"): # e.g. Name
value = value.pdf_repr()
elif hasattr(value, "serialize"): # e.g. PDFArray & PDFString
value = value.serialize()
elif isinstance(value, bool):
value = str(value).lower()
obj_dict[f"/{camel_case(key)}"] = value
return obj_dict
def camel_case(snake_case):
return "".join(x for x in snake_case.title() if x != "_")
class PDFString(str):
USE_HEX_ENCODING = True
"""
Setting this to False can reduce the encoded strings size,
but then there can be a risk of badly encoding some unicode strings - cf. issue #458
"""
def serialize(self):
if self.USE_HEX_ENCODING:
# Using the "Hexadecimal String" format defined in the PDF spec:
hex_str = hexlify(BOM_UTF16_BE + self.encode("utf-16-be")).decode("latin-1")
return f"<{hex_str}>"
return f'({self.encode("UTF-16").decode("latin-1")})'
class PDFArray(list):
def serialize(self):
if all(isinstance(elem, PDFObject) for elem in self):
serialized_elems = "\n".join(elem.ref for elem in self)
elif all(isinstance(elem, int) for elem in self):
serialized_elems = " ".join(map(str, self))
else:
raise NotImplementedError(f"PDFArray.serialize with self={self}")
return f"[{serialized_elems}]"
# cf. section 8.2.1 "Destinations" of the 2006 PDF spec 1.7:
class Destination(ABC):
def as_str(self, pdf=None):
raise NotImplementedError
class DestinationXYZ(Destination):
def __init__(self, page, x=0, y=0, zoom="null", page_as_obj_id=True):
self.page = page
self.x = x
self.y = y
self.zoom = zoom
self.page_as_obj_id = page_as_obj_id
def __repr__(self):
return f'DestinationXYZ(page={self.page}, x={self.x}, y={self.y}, zoom="{self.zoom}", page_as_obj_id={self.page_as_obj_id})'
def as_str(self, pdf=None):
left = self.x * pdf.k if pdf else self.x
if isinstance(left, float):
left = round(left, 2)
top = (pdf.h_pt - self.y * pdf.k) if pdf else self.y
if isinstance(top, float):
top = round(top, 2)
page = (
iobj_ref(object_id_for_page(self.page))
if self.page_as_obj_id
else self.page
)
return f"[{page} /XYZ {left} {top} {self.zoom}]" | /reportbro-fpdf2-0.9.1.tar.gz/reportbro-fpdf2-0.9.1/fpdf/syntax.py | 0.772745 | 0.333042 | syntax.py | pypi |
from typing import NamedTuple, Optional
from .syntax import Destination, PDFObject, PDFString
from .structure_tree import StructElem
class OutlineSection(NamedTuple):
name: str
level: str
page_number: int
dest: Destination
struct_elem: Optional[StructElem] = None
class OutlineItemDictionary(PDFObject):
__slots__ = (
"_id",
"title",
"parent",
"prev",
"next",
"first",
"last",
"count",
"dest",
"struct_elem",
)
def __init__(
self, title: str, dest: str = None, struct_elem: StructElem = None, **kwargs
):
super().__init__(**kwargs)
self.title = PDFString(title)
self.parent = None
self.prev = None
self.next = None
self.first = None
self.last = None
self.count = 0
self.dest = dest
self.struct_elem = struct_elem
class OutlineDictionary(PDFObject):
__slots__ = ("_id", "type", "first", "last", "count")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.type = "/Outlines"
self.first = None
self.last = None
self.count = 0
def serialize_outline(sections, first_object_id=1, fpdf=None):
"""
Assign object IDs & output the whole outline hierarchy serialized
as a multi-lines string in PDF syntax, ready to be embedded.
Objects ID assignement will start with the provided first ID,
that will be assigned to the Outlines object.
Apart from that, assignement is made in an arbitrary order.
All PDF objects must have assigned IDs before proceeding to output
generation though, as they have many references to each others.
If a FPDF instance provided, its `_newobj` & `_out` methods will be called
and this method output will be meaningless.
Args:
sections (sequence): list of OutlineSection
"""
outline, outline_items = build_outline(sections, first_object_id, fpdf)
return outline_as_str(outline, outline_items, fpdf)
def build_outline(sections, first_object_id, fpdf):
outline = OutlineDictionary(id=first_object_id)
n = first_object_id + 1
outline_items = []
last_outline_item_per_level = {}
for section in sections:
outline_item = OutlineItemDictionary(
title=section.name,
dest=section.dest.as_str(fpdf),
struct_elem=section.struct_elem,
id=n,
)
n += 1
if section.level in last_outline_item_per_level:
last_outline_item_at_level = last_outline_item_per_level[section.level]
last_outline_item_at_level.next = outline_item
outline_item.prev = last_outline_item_at_level
if section.level - 1 in last_outline_item_per_level:
parent_outline_item = last_outline_item_per_level[section.level - 1]
else:
parent_outline_item = outline
outline_item.parent = parent_outline_item
if parent_outline_item.first is None:
parent_outline_item.first = outline_item
parent_outline_item.last = outline_item
parent_outline_item.count += 1
outline_items.append(outline_item)
last_outline_item_per_level[section.level] = outline_item
last_outline_item_per_level = {
level: oitem
for level, oitem in last_outline_item_per_level.items()
if level <= section.level
}
return outline, outline_items
def outline_as_str(outline, outline_items, fpdf):
output = []
output.append(outline.serialize(fpdf))
for outline_item in outline_items:
output.append(outline_item.serialize(fpdf))
return "\n".join(output) | /reportbro-fpdf2-0.9.1.tar.gz/reportbro-fpdf2-0.9.1/fpdf/outline.py | 0.874158 | 0.172799 | outline.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.