repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
basilfx/flask-daapserver
|
examples/SoundcloudServer.py
|
download_file
|
python
|
def download_file(url, file_name):
logger.info("Downloading URL: %s", url)
file_size = 0
if not os.path.isfile(file_name):
response = requests.get(url, stream=True)
with open(file_name, "wb") as fp:
if not response.ok:
raise Exception("Download exception. Will fail.")
for block in response.iter_content(1024):
if not block:
break
fp.write(block)
file_size += len(block)
logger.info("Download finished, size is %d bytes.", file_size)
return file_size
|
Helper for downloading a remote file to disk.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/examples/SoundcloudServer.py#L129-L153
| null |
from daapserver.models import Server, Database, Item, Container, ContainerItem
from daapserver import DaapServer, provider
import os
import sys
import gevent
import logging
import tempfile
import requests
import soundcloud
# Logger instance
logger = logging.getLogger(__name__)
class RemoteItem(Item):
"""
A standard Item does not have a field for item URL and artwork URL. This
class extends Item by adding the fields. Note that `__slots__` is used for
memory efficiency.
"""
__slots__ = Item.__slots__ + ("file_url", "album_art_url")
class SoundcloudProvider(provider.LocalFileProvider):
"""
Provide a quick-and-dirty in-memory content provider that uses Soundcloud
as backend for providing data.
This provider is not efficient, as it will download the file before use.
Long tracks take some time before they actually start playing. Furthermore,
this provider does not cleanup files after server exits.
"""
def __init__(self, client_id, usernames):
super(SoundcloudProvider, self,).__init__()
# It's important that `self.server' is initialized, since it is used
# throughout the class.
self.server = server = Server(name="DAAPServer")
# Add example data to the library. Note that everything should be added
# in the right order. For instance, you cannot add an item to a
# database that has not been added to a server yet.
self.database = database = Database(id=1, name="Soundcloud Library")
server.databases.add(database)
self.container = container = Container(
id=1, name="My Music", is_base=True)
database.containers.add(container)
# Prepare Soundcloud connection.
self.temp_directory = tempfile.mkdtemp()
self.client = soundcloud.Client(client_id=client_id)
# Fetch tracks, asynchronous.
gevent.spawn(self.get_tracks, usernames)
# Inform provider that the structure is ready.
self.update()
def get_tracks(self, usernames):
logger.info("Fetching tracks for usernames: %s", usernames)
for username in usernames:
logger.info("Fetching tracks for user '%s'", username)
try:
tracks = self.client.get("/users/%s/tracks" % username)
logger.info(
"Found %d tracks for user '%s'.", len(tracks), username)
except:
logger.warning("Failed to get tracks for user '%s'", username)
continue
for index, track in enumerate(tracks):
track = track.obj
item = RemoteItem(
id=track["id"], artist=track["user"].get("username"),
name=track.get("title"), duration=track.get("duration"),
file_type="audio/mp3", file_suffix="mp3",
file_url="/tracks/%d/stream" % track["id"],
album_art_url=track["user"].get("avatar_url"),
album_art=True)
container_item = ContainerItem(
id=len(self.container.container_items) + 1,
container_id=self.container.id,
item_id=item.id)
# Add item to database
self.database.items.add(item)
self.container.container_items.add(container_item)
# The server and database have to be re-added so they are marked as
# updated.
self.server.databases.add(self.database)
self.database.containers.add(self.container)
# Inform provider of new tracks.
self.update()
def get_item_data(self, session, item, byte_range=None):
item.file_name = os.path.join(self.temp_directory, "%s.mp3" % item.id)
file_size = download_file(
self.client.get(item.file_url, allow_redirects=False).location,
item.file_name)
if file_size is not None:
item.file_size = file_size
# Stream actual item file from disk.
return super(SoundcloudProvider, self).get_item_data(
session, item, byte_range)
def get_artwork_data(self, session, item):
item.album_art = os.path.join(self.temp_directory, "%s.jpg" % item.id)
# Replacing https:// is just a workaround SSL issues with OpenSSL and
# requests problems.
download_file(
item.album_art_url.replace("https://", "http://"), item.album_art)
# Stream actual artwork file from disk.
return super(SoundcloudProvider, self).get_artwork_data(session, item)
def main():
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
# Check arguments.
if len(sys.argv) < 3:
sys.stdout.write(
"%s: <client_id> <user_1> .. <user_n>\n" % sys.argv[0])
return 1
# Create a server.
server = DaapServer(
provider=SoundcloudProvider(sys.argv[1], sys.argv[2:]),
port=3688,
debug=True)
# Start a server and wait until CTRL + C is pressed.
server.serve_forever()
# E.g. `python SoundcloudServer.py <client_id> <user_1> .. <user_n>'
if __name__ == "__main__":
sys.exit(main())
|
basilfx/flask-daapserver
|
daapserver/server.py
|
create_server_app
|
python
|
def create_server_app(provider, password=None, cache=True, cache_timeout=3600,
debug=False):
# Create Flask App
app = Flask(__name__, static_folder=None)
app.debug = debug
# Setup cache
if cache:
if type(cache) == bool:
cache = SimpleCache()
else:
# Assume is a user-provided cache with a get-set method.
pass
else:
cache = False
#
# Context-aware helpers and decorators
#
def daap_wsgi_app(func):
"""
WSGI middleware which will modify the environment and strip 'daap://'
from the path. This way, Flask can route the request properly.
"""
@wraps(func)
def _inner(environment, start_response):
if environment["PATH_INFO"].startswith("daap://") or \
environment["PATH_INFO"].startswith("http://"):
environment["PATH_INFO"] = "/" + \
environment["PATH_INFO"].split("/", 3)[3]
return func(environment, start_response)
return _inner
app.wsgi_app = daap_wsgi_app(app.wsgi_app)
def daap_trace(func):
"""
Utility method for tracing function calls. Helps debugging malicious
requests (e.g. protocol changes). Is only enabled when `debug` is True.
Normally, exceptions are caught by Flask and handled as Bad Requests.
Any debugging is therefore lost.
"""
# Do not apply when debug is False.
if not debug:
return func
@wraps(func)
def _inner(*args, **kwargs):
try:
start = time.time()
result = func(*args, **kwargs)
logger.debug(
"Request handling took %.6f seconds",
time.time() - start)
return result
except:
logger.exception(
"Caught exception before raising it to Flask.")
raise
return _inner
def daap_unpack_args(func):
"""
Strip query string arguments and add them to the method as keyword
arguments. Since the query string keys are defined, values will be
converted to their approriate format. An exception will be thrown in
case a requested argument is not available, or if the value could not
be converted.
"""
# Create a function specific mapping, only for arguments appearing in
# the function declaration.
args, _, _, _ = inspect.getargspec(func)
mappings = [mapping for mapping in QS_MAPPING if mapping[1] in args]
@wraps(func)
def _inner(*args, **kwargs):
for key, kwarg, casting in mappings:
kwargs[kwarg] = casting(request.args[key])
return func(*args, **kwargs)
return _inner
def daap_authenticate(func):
"""
Check authorization header, if authorization is given. Returns 401
response if the authentication failed.
"""
# Do not apply when no password is set
if not password:
return func
@wraps(func)
def _inner(*args, **kwargs):
auth = request.authorization
if not auth or not auth.password == password:
return Response(None, 401, {
"WWW-Authenticate": "Basic realm=\"%s\"" %
provider.server.name})
return func(*args, **kwargs)
return _inner
app.authenticate = daap_authenticate
def daap_cache_response(func):
"""
Cache object responses if the cache has been initialized. The cache key
is based on the request path and the semi-constant request arguments.
The response is caches for as long as possible, which should not be a
problem if the cache is cleared if the provider has new data.
"""
# Do not apply when cache is False.
if not cache:
return func
@wraps(func)
def _inner(*args, **kwargs):
# Create hash key via hashlib. We use MD5 since it is slightly
# faster than SHA1. Note that we don't require cryptographically
# strong hashes -- we just want to have a short and computationally
# unique key.
key = hashlib.md5()
# Add basic info
key.update(func.__name__)
key.update(request.path)
for k, v in request.args.iteritems():
if k not in QS_IGNORE_CACHE:
key.update(v)
# Hit the cache
key = key.digest()
value = cache.get(key)
if value is None:
value = func(*args, **kwargs)
cache.set(key, value, timeout=cache_timeout)
elif debug:
logger.debug("Loaded response from cache.")
return value
return _inner
#
# Request handlers
#
@app.after_request
def after_request(response):
"""
Append default response headers, independent of the return type.
"""
response.headers["DAAP-Server"] = provider.server.name
response.headers["Content-Language"] = "en_us"
response.headers["Accept-Ranges"] = "bytes"
return response
@app.route("/server-info", methods=["GET"])
@daap_trace
@daap_cache_response
def server_info():
"""
"""
data = responses.server_info(provider, provider.server.name, password)
return ObjectResponse(data)
@app.route("/content-codes", methods=["GET"])
@daap_trace
@daap_cache_response
def content_codes():
"""
"""
data = responses.content_codes(provider)
return ObjectResponse(data)
@app.route("/login", methods=["GET"])
@daap_trace
@daap_authenticate
def login():
"""
"""
session_id = provider.create_session(
user_agent=request.headers.get("User-Agent"),
remote_address=request.remote_addr,
client_version=request.headers.get(
"Client-DAAP-Version"))
data = responses.login(provider, session_id)
return ObjectResponse(data)
@app.route("/logout", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_unpack_args
def logout(session_id):
"""
"""
provider.destroy_session(session_id)
return Response(None, status=204)
@app.route("/activity", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_unpack_args
def activity(session_id):
"""
"""
return Response(None, status=200)
@app.route("/update", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_unpack_args
def update(session_id, revision, delta):
"""
"""
revision = provider.get_next_revision(session_id, revision, delta)
data = responses.update(provider, revision)
return ObjectResponse(data)
@app.route("/fp-setup", methods=["POST"])
@daap_trace
@daap_authenticate
def fp_setup():
"""
Fairplay validation, as sent by iTunes 11+. It will be unlikely this
will be ever implemented.
"""
raise NotImplementedError("Fairplay not supported.")
@app.route("/databases", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def databases(session_id, revision, delta):
"""
"""
new, old = provider.get_databases(session_id, revision, delta)
added, removed, is_update = utils.diff(new, old)
data = responses.databases(
provider, new, old, added, removed, is_update)
return ObjectResponse(data)
@app.route(
"/databases/<int:database_id>/items/<int:item_id>/extra_data/artwork",
methods=["GET"])
@daap_trace
@daap_unpack_args
def database_item_artwork(database_id, item_id, session_id):
"""
"""
data, mimetype, total_length = provider.get_artwork(
session_id, database_id, item_id)
# Setup response
response = Response(
data, 200, mimetype=mimetype,
direct_passthrough=not isinstance(data, basestring))
if total_length:
response.headers["Content-Length"] = total_length
return response
@app.route(
"/databases/<int:database_id>/groups/<int:group_id>/extra_data/"
"artwork", methods=["GET"])
@daap_trace
@daap_unpack_args
def database_group_artwork(database_id, group_id, session_id, revision,
delta):
"""
"""
raise NotImplemented("Groups not supported.")
@app.route(
"/databases/<int:database_id>/items/<int:item_id>.<suffix>",
methods=["GET"])
@daap_trace
@daap_unpack_args
def database_item(database_id, item_id, suffix, session_id):
"""
"""
range_header = request.headers.get("Range", None)
if range_header:
begin, end = http.parse_range_header(range_header).ranges[0]
data, mimetype, total_length = provider.get_item(
session_id, database_id, item_id, byte_range=(begin, end))
begin, end = (begin or 0), (end or total_length)
# Setup response
response = Response(
data, 206, mimetype=mimetype,
direct_passthrough=not isinstance(data, basestring))
# A streaming response with unknown content lenght, Range x-*
# as per RFC2616 section 14.16
if total_length <= 0:
response.headers["Content-Range"] = "bytes %d-%d/*" % (
begin, end - 1)
elif total_length > 0:
response.headers["Content-Range"] = "bytes %d-%d/%d" % (
begin, end - 1, total_length)
response.headers["Content-Length"] = end - begin
else:
data, mimetype, total_length = provider.get_item(
session_id, database_id, item_id)
# Setup response
response = Response(
data, 200, mimetype=mimetype,
direct_passthrough=not isinstance(data, basestring))
if total_length > 0:
response.headers["Content-Length"] = total_length
return response
@app.route("/databases/<int:database_id>/items", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def database_items(database_id, session_id, revision, delta, type):
"""
"""
new, old = provider.get_items(session_id, database_id, revision, delta)
added, removed, is_update = utils.diff(new, old)
data = responses.items(provider, new, old, added, removed, is_update)
return ObjectResponse(data)
@app.route("/databases/<int:database_id>/containers", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def database_containers(database_id, session_id, revision, delta):
"""
"""
new, old = provider.get_containers(
session_id, database_id, revision, delta)
added, removed, is_update = utils.diff(new, old)
data = responses.containers(
provider, new, old, added, removed, is_update)
return ObjectResponse(data)
@app.route("/databases/<int:database_id>/groups", methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def database_groups(database_id, session_id, revision, delta, type):
"""
"""
raise NotImplementedError("Groups not supported.")
@app.route(
"/databases/<int:database_id>/containers/<int:container_id>/items",
methods=["GET"])
@daap_trace
@daap_authenticate
@daap_cache_response
@daap_unpack_args
def database_container_item(database_id, container_id, session_id,
revision, delta):
"""
"""
new, old = provider.get_container_items(
session_id, database_id, container_id, revision, delta)
added, removed, is_update = utils.diff(new, old)
data = responses.container_items(
provider, new, old, added, removed, is_update)
return ObjectResponse(data)
# Return the app
return app
|
Create a DAAP server, based around a Flask application. The server requires
a content provider, server name and optionally, a password. The content
provider should return raw object data.
Object responses can be cached. This may dramatically speed up connections
for multiple clients. However, this is only limited to objects, not file
servings.
Note: in case the server is mounted as a WSGI app, make sure the server
passes the authorization header.
|
train
|
https://github.com/basilfx/flask-daapserver/blob/ca595fcbc5b657cba826eccd3be5cebba0a1db0e/daapserver/server.py#L49-L472
|
[
"def daap_wsgi_app(func):\n \"\"\"\n WSGI middleware which will modify the environment and strip 'daap://'\n from the path. This way, Flask can route the request properly.\n \"\"\"\n\n @wraps(func)\n def _inner(environment, start_response):\n if environment[\"PATH_INFO\"].startswith(\"daap://\") or \\\n environment[\"PATH_INFO\"].startswith(\"http://\"):\n environment[\"PATH_INFO\"] = \"/\" + \\\n environment[\"PATH_INFO\"].split(\"/\", 3)[3]\n return func(environment, start_response)\n return _inner\n",
"def daap_trace(func):\n \"\"\"\n Utility method for tracing function calls. Helps debugging malicious\n requests (e.g. protocol changes). Is only enabled when `debug` is True.\n Normally, exceptions are caught by Flask and handled as Bad Requests.\n Any debugging is therefore lost.\n \"\"\"\n\n # Do not apply when debug is False.\n if not debug:\n return func\n\n @wraps(func)\n def _inner(*args, **kwargs):\n try:\n start = time.time()\n result = func(*args, **kwargs)\n logger.debug(\n \"Request handling took %.6f seconds\",\n time.time() - start)\n\n return result\n except:\n logger.exception(\n \"Caught exception before raising it to Flask.\")\n raise\n\n return _inner\n",
"def daap_unpack_args(func):\n \"\"\"\n Strip query string arguments and add them to the method as keyword\n arguments. Since the query string keys are defined, values will be\n converted to their approriate format. An exception will be thrown in\n case a requested argument is not available, or if the value could not\n be converted.\n \"\"\"\n\n # Create a function specific mapping, only for arguments appearing in\n # the function declaration.\n args, _, _, _ = inspect.getargspec(func)\n mappings = [mapping for mapping in QS_MAPPING if mapping[1] in args]\n\n @wraps(func)\n def _inner(*args, **kwargs):\n for key, kwarg, casting in mappings:\n kwargs[kwarg] = casting(request.args[key])\n return func(*args, **kwargs)\n return _inner\n",
"def daap_authenticate(func):\n \"\"\"\n Check authorization header, if authorization is given. Returns 401\n response if the authentication failed.\n \"\"\"\n\n # Do not apply when no password is set\n if not password:\n return func\n\n @wraps(func)\n def _inner(*args, **kwargs):\n auth = request.authorization\n\n if not auth or not auth.password == password:\n return Response(None, 401, {\n \"WWW-Authenticate\": \"Basic realm=\\\"%s\\\"\" %\n provider.server.name})\n return func(*args, **kwargs)\n return _inner\n",
"def daap_cache_response(func):\n \"\"\"\n Cache object responses if the cache has been initialized. The cache key\n is based on the request path and the semi-constant request arguments.\n The response is caches for as long as possible, which should not be a\n problem if the cache is cleared if the provider has new data.\n \"\"\"\n\n # Do not apply when cache is False.\n if not cache:\n return func\n\n @wraps(func)\n def _inner(*args, **kwargs):\n # Create hash key via hashlib. We use MD5 since it is slightly\n # faster than SHA1. Note that we don't require cryptographically\n # strong hashes -- we just want to have a short and computationally\n # unique key.\n key = hashlib.md5()\n\n # Add basic info\n key.update(func.__name__)\n key.update(request.path)\n\n for k, v in request.args.iteritems():\n if k not in QS_IGNORE_CACHE:\n key.update(v)\n\n # Hit the cache\n key = key.digest()\n value = cache.get(key)\n\n if value is None:\n value = func(*args, **kwargs)\n cache.set(key, value, timeout=cache_timeout)\n elif debug:\n logger.debug(\"Loaded response from cache.\")\n return value\n return _inner\n"
] |
from daapserver import responses, utils
from flask import Flask, Response, request
from werkzeug.contrib.cache import SimpleCache
from werkzeug import http
from functools import wraps
import hashlib
import inspect
import logging
import time
# Logger instance
logger = logging.getLogger(__name__)
# Mapping for query string arguments to function arguments. Used by the
# daap_unpack_args decorator.
QS_MAPPING = [
("session-id", "session_id", int),
("revision-number", "revision", int),
("delta", "delta", int),
("type", "type", str),
("meta", "meta", lambda x: x.split(","))
]
# Query string arguments ignored for generating a cache key. Used by the
# daap_cache. This makes sure that identical requests from different sessions
# will yield from cache.
QS_IGNORE_CACHE = [
"session-id",
]
class ObjectResponse(Response):
"""
DAAP object response. Encodes a DAAPObject to raw bytes and sets the
content type.
"""
def __init__(self, data, *args, **kwargs):
# Set DAAP content type
kwargs["mimetype"] = "application/x-dmap-tagged"
# Instantiate response
super(ObjectResponse, self).__init__(data.encode(), *args, **kwargs)
|
draperunner/fjlc
|
fjlc/preprocessing/filters/filters.py
|
Filters.string_chain
|
python
|
def string_chain(text, filters):
if filters is None:
return text
for filter_function in filters:
text = filter_function(text)
return text
|
Chain several filters after each other, applies the filter on the entire string
:param text: String to format
:param filters: Sequence of filters to apply on String
:return: The formatted String
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/preprocessing/filters/filters.py#L25-L38
| null |
class Filters:
USERNAME_PLACEHOLDER = " ||username|| "
HASHTAG_PLACEHOLDER = " ||hashtag|| "
RTTAG_PLACEHOLDER = " ||rt|| "
URL_PLACEHOLDER = " ||url|| "
def __init__(self, string_filters, token_filters):
self.string_filters = string_filters
self.token_filters = token_filters
def apply(self, text):
text = self.string_chain(text, self.string_filters)
return self.token_chain(text, self.token_filters).strip()
@staticmethod
@staticmethod
def token_chain(text, filters):
"""
Chain several filters after each other, applying filters only on non special class tokens as detected by
{@link ClassifierOptions#isSpecialClassWord(String)}
:param text: String to format
:param filters: Sequence of filters to apply to tokens
:return: The formatted String
"""
if filters is None:
return text
sb = ""
for token in RegexFilters.WHITESPACE.split(text):
if not classifier_options.is_special_class_word(token):
token = Filters.string_chain(token, filters)
sb += token + " "
return sb
@staticmethod
def html_unescape(text):
"""
Returns HTML unescaped string
:param text: String to format (f.ex. "<3")
:return: The formatted String (f.ex. "<3")
"""
return html.unescape(text)
@staticmethod
def normalize_form(text):
"""
Normalizes String to Latin characters if possible. WARNING: This also applies non-ASCII filter to the entire string
:param text: String to format (f.ex. "A strîng wìth fúnny chäracters")
:return: The formatted String (f.ex. "A string with funny characters")
"""
return normalizr.remove_accent_marks(text)
@staticmethod
def remove_repeated_whitespace(text):
"""
Removes repeated whitespace
:param text: String to format (f.ex. "A string with maany spaces ")
:return: The formatted String (f.ex. "A string with many spaces ")
"""
return RegexFilters.replace_whitespace(text, " ")
@staticmethod
def parse_unicode_emojis_to_alias(text):
"""
Uses {@link EmojiParser#parseFromUnicode(String, EmojiParser.EmojiTransformer)} to parse unicode emojis to ASCII
string " ||emoji_alias|| ".
:param text: String to format (f.ex. "Hey \uD83D\uDC66\uD83C\uDFFF!")
:return: The formatted String (f.ex. "Hey ||boy|| !")
"""
#return EmojiParser.parseFromUnicode(text, EMOJI_ALIAS_TRANSFORMER)
return text
@staticmethod
def remove_unicode_emoticons(text):
return normalizr.replace_emojis(text)
@staticmethod
def parse_emoticons(text):
return RegexFilters.replace_emoticons(text, " ||$1|| ")
@staticmethod
def remove_emoticons(text):
return RegexFilters.replace_emoticons(text, "")
@staticmethod
def remove_username(text):
return RegexFilters.replace_username(text, "")
@staticmethod
def placeholder_username(text):
return RegexFilters.replace_username(text, Filters.USERNAME_PLACEHOLDER)
@staticmethod
def remove_email(text):
return re.sub(RegexFilters.TWITTER_EMAIL, "", text)
@staticmethod
def remove_hashtag(text):
return RegexFilters.replace_hashtag(text, "")
@staticmethod
def placeholder_hashtag(text):
return RegexFilters.replace_hashtag(text, Filters.HASHTAG_PLACEHOLDER)
@staticmethod
def hashtag_to_word(text):
return RegexFilters.replace_hashtag(text, "$1")
@staticmethod
def protect_hashtag(text):
return RegexFilters.replace_hashtag(text, " ||#$1|| ")
@staticmethod
def remove_rt_tag(text):
return RegexFilters.replace_rt_tag(text, "")
@staticmethod
def placeholder_rt_tag(text):
return RegexFilters.replace_rt_tag(text, Filters.RTTAG_PLACEHOLDER)
@staticmethod
def remove_url(text):
return RegexFilters.replace_url(text, "")
@staticmethod
def placeholder_url(text):
return RegexFilters.replace_url(text, Filters.URL_PLACEHOLDER)
@staticmethod
def remove_inner_word_characters(text):
"""
Removes characters which are often part of a word (mostly apostrophes)
:param text: String to format (f.ex. "Here's a sentence!")
:return: The formatted String (f.ex. "Heres a sentence!")
"""
return RegexFilters.replace_inner_word_characters(text, "")
@staticmethod
def remove_non_syntactical_text(text):
"""
Removes all non-alphabetic or basic punctuation characters (!?,. )
:param text: String to format (f.ex. "This is' a #crazy tæst")
:return: The formatted String (f.ex. "This is a crazy tst")
"""
return RegexFilters.replace_non_syntactical_text(text, " ")
@staticmethod
def remove_non_syntactical_text_plus(text):
return RegexFilters.replace_non_syntactical_text_plus(text, " ")
@staticmethod
def remove_non_alphanumerical_text(text):
"""
Removes non-alphanumerical characters
:param text: String to format (f.ex "It's very nice!")
:return: The formatted String (f.ex "It s very nice ")
"""
return RegexFilters.replace_non_alphanumerical_text(text, " ")
@staticmethod
def remove_non_alphabetic_text(text):
"""
Removes non alphabetic characters
:param text: String to format (f.ex "Hey, m8!")
:return: The formatted String (f.ex. "Hey m")
"""
return RegexFilters.replace_non_alphabetic_text(text, "")
@staticmethod
def remove_free_digits(text):
"""
Removes free standing digits (digits not part of a word)
:param text: String to format (f.ex. "Only 90s kids will get this 1337 m8")
:return: The formatted String (f.ex. "Only 90s kids will get this m8")
"""
return RegexFilters.replace_free_digits(text, " ")
@staticmethod
def remove_repeating_characters(text):
"""
Replaces repeating characters in String
:param text: String to format (f.ex. "Today is a greeeeeeaaaaaaat dayy!")
:return: The formatted String (f.ex. "Today is a great day!")
"""
return RegexFilters.replace_repeating_characters(text, "$1")
|
draperunner/fjlc
|
fjlc/preprocessing/filters/filters.py
|
Filters.token_chain
|
python
|
def token_chain(text, filters):
if filters is None:
return text
sb = ""
for token in RegexFilters.WHITESPACE.split(text):
if not classifier_options.is_special_class_word(token):
token = Filters.string_chain(token, filters)
sb += token + " "
return sb
|
Chain several filters after each other, applying filters only on non special class tokens as detected by
{@link ClassifierOptions#isSpecialClassWord(String)}
:param text: String to format
:param filters: Sequence of filters to apply to tokens
:return: The formatted String
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/preprocessing/filters/filters.py#L41-L60
|
[
"def string_chain(text, filters):\n \"\"\"\n Chain several filters after each other, applies the filter on the entire string\n :param text: String to format\n :param filters: Sequence of filters to apply on String\n :return: The formatted String\n \"\"\"\n if filters is None:\n return text\n\n for filter_function in filters:\n text = filter_function(text)\n\n return text\n"
] |
class Filters:
USERNAME_PLACEHOLDER = " ||username|| "
HASHTAG_PLACEHOLDER = " ||hashtag|| "
RTTAG_PLACEHOLDER = " ||rt|| "
URL_PLACEHOLDER = " ||url|| "
def __init__(self, string_filters, token_filters):
self.string_filters = string_filters
self.token_filters = token_filters
def apply(self, text):
text = self.string_chain(text, self.string_filters)
return self.token_chain(text, self.token_filters).strip()
@staticmethod
def string_chain(text, filters):
"""
Chain several filters after each other, applies the filter on the entire string
:param text: String to format
:param filters: Sequence of filters to apply on String
:return: The formatted String
"""
if filters is None:
return text
for filter_function in filters:
text = filter_function(text)
return text
@staticmethod
@staticmethod
def html_unescape(text):
"""
Returns HTML unescaped string
:param text: String to format (f.ex. "<3")
:return: The formatted String (f.ex. "<3")
"""
return html.unescape(text)
@staticmethod
def normalize_form(text):
"""
Normalizes String to Latin characters if possible. WARNING: This also applies non-ASCII filter to the entire string
:param text: String to format (f.ex. "A strîng wìth fúnny chäracters")
:return: The formatted String (f.ex. "A string with funny characters")
"""
return normalizr.remove_accent_marks(text)
@staticmethod
def remove_repeated_whitespace(text):
"""
Removes repeated whitespace
:param text: String to format (f.ex. "A string with maany spaces ")
:return: The formatted String (f.ex. "A string with many spaces ")
"""
return RegexFilters.replace_whitespace(text, " ")
@staticmethod
def parse_unicode_emojis_to_alias(text):
"""
Uses {@link EmojiParser#parseFromUnicode(String, EmojiParser.EmojiTransformer)} to parse unicode emojis to ASCII
string " ||emoji_alias|| ".
:param text: String to format (f.ex. "Hey \uD83D\uDC66\uD83C\uDFFF!")
:return: The formatted String (f.ex. "Hey ||boy|| !")
"""
#return EmojiParser.parseFromUnicode(text, EMOJI_ALIAS_TRANSFORMER)
return text
@staticmethod
def remove_unicode_emoticons(text):
return normalizr.replace_emojis(text)
@staticmethod
def parse_emoticons(text):
return RegexFilters.replace_emoticons(text, " ||$1|| ")
@staticmethod
def remove_emoticons(text):
return RegexFilters.replace_emoticons(text, "")
@staticmethod
def remove_username(text):
return RegexFilters.replace_username(text, "")
@staticmethod
def placeholder_username(text):
return RegexFilters.replace_username(text, Filters.USERNAME_PLACEHOLDER)
@staticmethod
def remove_email(text):
return re.sub(RegexFilters.TWITTER_EMAIL, "", text)
@staticmethod
def remove_hashtag(text):
return RegexFilters.replace_hashtag(text, "")
@staticmethod
def placeholder_hashtag(text):
return RegexFilters.replace_hashtag(text, Filters.HASHTAG_PLACEHOLDER)
@staticmethod
def hashtag_to_word(text):
return RegexFilters.replace_hashtag(text, "$1")
@staticmethod
def protect_hashtag(text):
return RegexFilters.replace_hashtag(text, " ||#$1|| ")
@staticmethod
def remove_rt_tag(text):
return RegexFilters.replace_rt_tag(text, "")
@staticmethod
def placeholder_rt_tag(text):
return RegexFilters.replace_rt_tag(text, Filters.RTTAG_PLACEHOLDER)
@staticmethod
def remove_url(text):
return RegexFilters.replace_url(text, "")
@staticmethod
def placeholder_url(text):
return RegexFilters.replace_url(text, Filters.URL_PLACEHOLDER)
@staticmethod
def remove_inner_word_characters(text):
"""
Removes characters which are often part of a word (mostly apostrophes)
:param text: String to format (f.ex. "Here's a sentence!")
:return: The formatted String (f.ex. "Heres a sentence!")
"""
return RegexFilters.replace_inner_word_characters(text, "")
@staticmethod
def remove_non_syntactical_text(text):
"""
Removes all non-alphabetic or basic punctuation characters (!?,. )
:param text: String to format (f.ex. "This is' a #crazy tæst")
:return: The formatted String (f.ex. "This is a crazy tst")
"""
return RegexFilters.replace_non_syntactical_text(text, " ")
@staticmethod
def remove_non_syntactical_text_plus(text):
return RegexFilters.replace_non_syntactical_text_plus(text, " ")
@staticmethod
def remove_non_alphanumerical_text(text):
"""
Removes non-alphanumerical characters
:param text: String to format (f.ex "It's very nice!")
:return: The formatted String (f.ex "It s very nice ")
"""
return RegexFilters.replace_non_alphanumerical_text(text, " ")
@staticmethod
def remove_non_alphabetic_text(text):
"""
Removes non alphabetic characters
:param text: String to format (f.ex "Hey, m8!")
:return: The formatted String (f.ex. "Hey m")
"""
return RegexFilters.replace_non_alphabetic_text(text, "")
@staticmethod
def remove_free_digits(text):
"""
Removes free standing digits (digits not part of a word)
:param text: String to format (f.ex. "Only 90s kids will get this 1337 m8")
:return: The formatted String (f.ex. "Only 90s kids will get this m8")
"""
return RegexFilters.replace_free_digits(text, " ")
@staticmethod
def remove_repeating_characters(text):
"""
Replaces repeating characters in String
:param text: String to format (f.ex. "Today is a greeeeeeaaaaaaat dayy!")
:return: The formatted String (f.ex. "Today is a great day!")
"""
return RegexFilters.replace_repeating_characters(text, "$1")
|
draperunner/fjlc
|
fjlc/classifier/sentence/lexical_parser.py
|
lexically_parse_tweet
|
python
|
def lexically_parse_tweet(tweet, phrase_tree):
lexical_tokens = []
prev = 0
while True:
match = RegexFilters.SENTENCE_END_PUNCTUATION.search(tweet[prev:])
if match is None:
break
span = match.span()
sentence = tweet[prev:prev + span[0]]
punctuation = match.group(0)
prev += span[1]
lexical_tokens.extend(parse_sentence(sentence, punctuation, phrase_tree))
lexical_tokens.extend(parse_sentence(tweet[prev:], None, phrase_tree))
return lexical_tokens
|
Returns list of LexicalTokens found in tweet. The list contains all the words in original tweet, but are
optimally grouped up to form largest matching n-grams from lexicon. If no match is found, token is added as
singleton.
@param tweet Tweet to lexically parse
@param phrase_tree Token tree that contains all the lexical n-grams
@return List of LexicalTokens
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/classifier/sentence/lexical_parser.py#L10-L36
|
[
"def parse_sentence(sentence, punctuation, phrase_tree):\n sentence_tokens = RegexFilters.WHITESPACE.split(sentence)\n\n tokenized_sentence = phrase_tree.find_optimal_tokenization(sentence_tokens)\n tokens = list(map(lambda s: LexicalToken(s), tokenized_sentence))\n\n if len(tokens) > 0:\n tokens[len(tokens) - 1].set_at_end_of_sentence(True)\n\n if punctuation is not None and \"!\" in punctuation:\n exclamation_intensifier = classifier_options.get_variable(classifier_options.Variable.EXCLAMATION_INTENSIFIER)\n for token in tokens:\n token.intensify_token(exclamation_intensifier)\n\n elif punctuation is not None and \"?\" in punctuation:\n question_intensifier = classifier_options.get_variable(classifier_options.Variable.QUESTION_INTENSIFIER)\n for token in tokens:\n token.intensify_token(question_intensifier)\n\n return tokens\n"
] |
"""
import com.freva.masteroppgave.lexicon.container.TokenTrie;
"""
import fjlc.classifier.classifier_options as classifier_options
from fjlc.classifier.sentence.lexical_token import LexicalToken
from fjlc.preprocessing.filters.regex_filters import RegexFilters
def parse_sentence(sentence, punctuation, phrase_tree):
sentence_tokens = RegexFilters.WHITESPACE.split(sentence)
tokenized_sentence = phrase_tree.find_optimal_tokenization(sentence_tokens)
tokens = list(map(lambda s: LexicalToken(s), tokenized_sentence))
if len(tokens) > 0:
tokens[len(tokens) - 1].set_at_end_of_sentence(True)
if punctuation is not None and "!" in punctuation:
exclamation_intensifier = classifier_options.get_variable(classifier_options.Variable.EXCLAMATION_INTENSIFIER)
for token in tokens:
token.intensify_token(exclamation_intensifier)
elif punctuation is not None and "?" in punctuation:
question_intensifier = classifier_options.get_variable(classifier_options.Variable.QUESTION_INTENSIFIER)
for token in tokens:
token.intensify_token(question_intensifier)
return tokens
|
draperunner/fjlc
|
fjlc/lexicon/container/token_trie.py
|
TokenTrie.has_tokens
|
python
|
def has_tokens(self, phrase):
if len(phrase) == 1 and classifier_options.is_special_class_word(phrase[0]):
return True
tree = self.root
for token in phrase:
if not tree.has_child(token):
return False
tree = tree.get_child(token)
return True if tree.is_end_of_phrase() else None
|
Checks if phrase or sub-phrase exists in the tree.
If set of phrases contains phrases such as: "state", "of the" and "state of the art", look up on:
"state" returns true, "of" returns null, "of the art" returns false.
:param phrase: Phrase or sub-phrase to look up.
:type: phrase: list of str
:return: Returns true if phrase in its entirety is in the tree,
null if part of the phrase matches a larger tokenSequence,
false if phrases matches no other phrase entirely and not part any longer phrase.
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/lexicon/container/token_trie.py#L27-L50
| null |
class TokenTrie:
def __init__(self, sentences):
"""
Creates a phrase trie for efficient sub-phrase look up
@param sentences List of Strings of all the phrases which are whitespace delimited n-grams
"""
self.root = TokenTrie.Node()
for sentence in sentences:
words = RegexFilters.WHITESPACE.split(sentence)
self.add_token_sequence(words)
def add_token_sequence(self, token_sequence):
tree = self.root
for token in token_sequence:
if not tree.has_child(token):
tree.add_child(token)
tree = tree.get_child(token)
tree.set_phrase_end(True)
def find_tracked_words(self, tokens):
"""
Finds word-ranges all of phrases in tokens stored in TokenTrie
:param tokens: Sequence of tokens to find phrases in
:type tokens: list of str
:return: List of Tokens found in tokens
"""
tracked_words = []
for i in range(len(tokens)):
for j in range(i + 1, len(tokens) + 1):
phrase = tokens[i:j]
status = self.has_tokens(phrase)
if status is not None:
if status is True:
tracked_words.append(TokenTrie.Token(phrase, i, j - 1))
elif status is False:
break
return tracked_words
def find_optimal_allocation(self, tokens):
"""
Finds longest, non-overlapping word-ranges of phrases in tokens stored in TokenTrie
:param tokens: tokens tokenize
:type tokens: list of str
:return: Optimal allocation of tokens to phrases
:rtype: list of TokenTrie.Token
"""
token_ranges = self.find_tracked_words(tokens)
token_ranges.sort()
for offset in range(1, len(token_ranges)):
to_be_removed = []
for candidate in token_ranges[offset:]:
for i in range(offset):
if token_ranges[i].overlaps_with(candidate):
to_be_removed.append(candidate)
break
token_ranges = [token for token in token_ranges if token not in to_be_removed]
token_ranges.sort(key=lambda token: token.get_start_index())
return token_ranges
def find_optimal_tokenization(self, tokens):
"""
Similar to {@link #findOptimalAllocation(String[])}, but also includes the words not matching any longer n-gram
in TokenTrie as singletons.
:param tokens: tokens to tokenize
:return: Optimal allocation of tokens to phrases, with non matching tokens as singletons.
"""
token_ranges = self.find_optimal_allocation(tokens)
tokenized_sentence = []
set_index = 0
for token in token_ranges:
while set_index < token.get_start_index():
tokenized_sentence.append(tokens[set_index])
set_index += 1
tokenized_sentence.append(" ".join(token.get_token_sequence()))
set_index = token.get_end_index() + 1
while set_index < len(tokens):
tokenized_sentence.append(tokens[set_index])
set_index += 1
return tokenized_sentence
@functools.total_ordering
class Token:
def __init__(self, token_sequence, start_index, end_index):
self.token_sequence = token_sequence
self.start_index = start_index
self.end_index = end_index
def get_token_sequence(self):
return self.token_sequence
def get_start_index(self):
return self.start_index
def get_end_index(self):
return self.end_index
def get_phrase_length(self):
return self.end_index - self.start_index
def overlaps_with(self, other):
"""
Checks if two phrases overlap
:param other: The other token_sequence
:return: True if overlap, False otherwise
"""
return self.get_start_index() <= other.get_end_index() and other.get_start_index() <= self.get_end_index()
def __lt__(self, other):
size_diff = other.get_phrase_length() - self.get_phrase_length()
diff = size_diff if size_diff != 0 else other.get_start_index() - self.get_start_index()
return diff < 0
def __eq__(self, other):
size_diff = other.get_phrase_length() - self.get_phrase_length()
diff = size_diff if size_diff != 0 else other.get_start_index() - self.get_start_index()
return diff == 0
def __str__(self):
return str(self.token_sequence)
def __repr__(self):
return "Token(" + str(self.start_index) + "," + str(self.end_index) + ")<" + str(self.token_sequence) + ">"
class Node:
def __init__(self):
self.children = {}
self.end_of_phrase = False
def has_child(self, value):
return value in self.children
def add_child(self, value):
self.children[value] = TokenTrie.Node()
def get_child(self, value):
return self.children[value]
def set_phrase_end(self, phrase_end):
self.end_of_phrase = phrase_end
def is_end_of_phrase(self):
return self.end_of_phrase
|
draperunner/fjlc
|
fjlc/lexicon/container/token_trie.py
|
TokenTrie.find_tracked_words
|
python
|
def find_tracked_words(self, tokens):
tracked_words = []
for i in range(len(tokens)):
for j in range(i + 1, len(tokens) + 1):
phrase = tokens[i:j]
status = self.has_tokens(phrase)
if status is not None:
if status is True:
tracked_words.append(TokenTrie.Token(phrase, i, j - 1))
elif status is False:
break
return tracked_words
|
Finds word-ranges all of phrases in tokens stored in TokenTrie
:param tokens: Sequence of tokens to find phrases in
:type tokens: list of str
:return: List of Tokens found in tokens
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/lexicon/container/token_trie.py#L52-L73
|
[
"def has_tokens(self, phrase):\n \"\"\"\n Checks if phrase or sub-phrase exists in the tree.\n\n If set of phrases contains phrases such as: \"state\", \"of the\" and \"state of the art\", look up on:\n \"state\" returns true, \"of\" returns null, \"of the art\" returns false.\n\n :param phrase: Phrase or sub-phrase to look up.\n :type: phrase: list of str\n :return: Returns true if phrase in its entirety is in the tree,\n null if part of the phrase matches a larger tokenSequence,\n false if phrases matches no other phrase entirely and not part any longer phrase.\n \"\"\"\n\n if len(phrase) == 1 and classifier_options.is_special_class_word(phrase[0]):\n return True\n\n tree = self.root\n for token in phrase:\n if not tree.has_child(token):\n return False\n tree = tree.get_child(token)\n\n return True if tree.is_end_of_phrase() else None\n"
] |
class TokenTrie:
def __init__(self, sentences):
"""
Creates a phrase trie for efficient sub-phrase look up
@param sentences List of Strings of all the phrases which are whitespace delimited n-grams
"""
self.root = TokenTrie.Node()
for sentence in sentences:
words = RegexFilters.WHITESPACE.split(sentence)
self.add_token_sequence(words)
def add_token_sequence(self, token_sequence):
tree = self.root
for token in token_sequence:
if not tree.has_child(token):
tree.add_child(token)
tree = tree.get_child(token)
tree.set_phrase_end(True)
def has_tokens(self, phrase):
"""
Checks if phrase or sub-phrase exists in the tree.
If set of phrases contains phrases such as: "state", "of the" and "state of the art", look up on:
"state" returns true, "of" returns null, "of the art" returns false.
:param phrase: Phrase or sub-phrase to look up.
:type: phrase: list of str
:return: Returns true if phrase in its entirety is in the tree,
null if part of the phrase matches a larger tokenSequence,
false if phrases matches no other phrase entirely and not part any longer phrase.
"""
if len(phrase) == 1 and classifier_options.is_special_class_word(phrase[0]):
return True
tree = self.root
for token in phrase:
if not tree.has_child(token):
return False
tree = tree.get_child(token)
return True if tree.is_end_of_phrase() else None
def find_optimal_allocation(self, tokens):
"""
Finds longest, non-overlapping word-ranges of phrases in tokens stored in TokenTrie
:param tokens: tokens tokenize
:type tokens: list of str
:return: Optimal allocation of tokens to phrases
:rtype: list of TokenTrie.Token
"""
token_ranges = self.find_tracked_words(tokens)
token_ranges.sort()
for offset in range(1, len(token_ranges)):
to_be_removed = []
for candidate in token_ranges[offset:]:
for i in range(offset):
if token_ranges[i].overlaps_with(candidate):
to_be_removed.append(candidate)
break
token_ranges = [token for token in token_ranges if token not in to_be_removed]
token_ranges.sort(key=lambda token: token.get_start_index())
return token_ranges
def find_optimal_tokenization(self, tokens):
"""
Similar to {@link #findOptimalAllocation(String[])}, but also includes the words not matching any longer n-gram
in TokenTrie as singletons.
:param tokens: tokens to tokenize
:return: Optimal allocation of tokens to phrases, with non matching tokens as singletons.
"""
token_ranges = self.find_optimal_allocation(tokens)
tokenized_sentence = []
set_index = 0
for token in token_ranges:
while set_index < token.get_start_index():
tokenized_sentence.append(tokens[set_index])
set_index += 1
tokenized_sentence.append(" ".join(token.get_token_sequence()))
set_index = token.get_end_index() + 1
while set_index < len(tokens):
tokenized_sentence.append(tokens[set_index])
set_index += 1
return tokenized_sentence
@functools.total_ordering
class Token:
def __init__(self, token_sequence, start_index, end_index):
self.token_sequence = token_sequence
self.start_index = start_index
self.end_index = end_index
def get_token_sequence(self):
return self.token_sequence
def get_start_index(self):
return self.start_index
def get_end_index(self):
return self.end_index
def get_phrase_length(self):
return self.end_index - self.start_index
def overlaps_with(self, other):
"""
Checks if two phrases overlap
:param other: The other token_sequence
:return: True if overlap, False otherwise
"""
return self.get_start_index() <= other.get_end_index() and other.get_start_index() <= self.get_end_index()
def __lt__(self, other):
size_diff = other.get_phrase_length() - self.get_phrase_length()
diff = size_diff if size_diff != 0 else other.get_start_index() - self.get_start_index()
return diff < 0
def __eq__(self, other):
size_diff = other.get_phrase_length() - self.get_phrase_length()
diff = size_diff if size_diff != 0 else other.get_start_index() - self.get_start_index()
return diff == 0
def __str__(self):
return str(self.token_sequence)
def __repr__(self):
return "Token(" + str(self.start_index) + "," + str(self.end_index) + ")<" + str(self.token_sequence) + ">"
class Node:
def __init__(self):
self.children = {}
self.end_of_phrase = False
def has_child(self, value):
return value in self.children
def add_child(self, value):
self.children[value] = TokenTrie.Node()
def get_child(self, value):
return self.children[value]
def set_phrase_end(self, phrase_end):
self.end_of_phrase = phrase_end
def is_end_of_phrase(self):
return self.end_of_phrase
|
draperunner/fjlc
|
fjlc/lexicon/container/token_trie.py
|
TokenTrie.find_optimal_allocation
|
python
|
def find_optimal_allocation(self, tokens):
token_ranges = self.find_tracked_words(tokens)
token_ranges.sort()
for offset in range(1, len(token_ranges)):
to_be_removed = []
for candidate in token_ranges[offset:]:
for i in range(offset):
if token_ranges[i].overlaps_with(candidate):
to_be_removed.append(candidate)
break
token_ranges = [token for token in token_ranges if token not in to_be_removed]
token_ranges.sort(key=lambda token: token.get_start_index())
return token_ranges
|
Finds longest, non-overlapping word-ranges of phrases in tokens stored in TokenTrie
:param tokens: tokens tokenize
:type tokens: list of str
:return: Optimal allocation of tokens to phrases
:rtype: list of TokenTrie.Token
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/lexicon/container/token_trie.py#L75-L98
|
[
"def find_tracked_words(self, tokens):\n \"\"\"\n Finds word-ranges all of phrases in tokens stored in TokenTrie\n\n :param tokens: Sequence of tokens to find phrases in\n :type tokens: list of str\n :return: List of Tokens found in tokens\n \"\"\"\n tracked_words = []\n\n for i in range(len(tokens)):\n for j in range(i + 1, len(tokens) + 1):\n phrase = tokens[i:j]\n status = self.has_tokens(phrase)\n\n if status is not None:\n if status is True:\n tracked_words.append(TokenTrie.Token(phrase, i, j - 1))\n elif status is False:\n break\n\n return tracked_words\n"
] |
class TokenTrie:
def __init__(self, sentences):
"""
Creates a phrase trie for efficient sub-phrase look up
@param sentences List of Strings of all the phrases which are whitespace delimited n-grams
"""
self.root = TokenTrie.Node()
for sentence in sentences:
words = RegexFilters.WHITESPACE.split(sentence)
self.add_token_sequence(words)
def add_token_sequence(self, token_sequence):
tree = self.root
for token in token_sequence:
if not tree.has_child(token):
tree.add_child(token)
tree = tree.get_child(token)
tree.set_phrase_end(True)
def has_tokens(self, phrase):
"""
Checks if phrase or sub-phrase exists in the tree.
If set of phrases contains phrases such as: "state", "of the" and "state of the art", look up on:
"state" returns true, "of" returns null, "of the art" returns false.
:param phrase: Phrase or sub-phrase to look up.
:type: phrase: list of str
:return: Returns true if phrase in its entirety is in the tree,
null if part of the phrase matches a larger tokenSequence,
false if phrases matches no other phrase entirely and not part any longer phrase.
"""
if len(phrase) == 1 and classifier_options.is_special_class_word(phrase[0]):
return True
tree = self.root
for token in phrase:
if not tree.has_child(token):
return False
tree = tree.get_child(token)
return True if tree.is_end_of_phrase() else None
def find_tracked_words(self, tokens):
"""
Finds word-ranges all of phrases in tokens stored in TokenTrie
:param tokens: Sequence of tokens to find phrases in
:type tokens: list of str
:return: List of Tokens found in tokens
"""
tracked_words = []
for i in range(len(tokens)):
for j in range(i + 1, len(tokens) + 1):
phrase = tokens[i:j]
status = self.has_tokens(phrase)
if status is not None:
if status is True:
tracked_words.append(TokenTrie.Token(phrase, i, j - 1))
elif status is False:
break
return tracked_words
def find_optimal_tokenization(self, tokens):
"""
Similar to {@link #findOptimalAllocation(String[])}, but also includes the words not matching any longer n-gram
in TokenTrie as singletons.
:param tokens: tokens to tokenize
:return: Optimal allocation of tokens to phrases, with non matching tokens as singletons.
"""
token_ranges = self.find_optimal_allocation(tokens)
tokenized_sentence = []
set_index = 0
for token in token_ranges:
while set_index < token.get_start_index():
tokenized_sentence.append(tokens[set_index])
set_index += 1
tokenized_sentence.append(" ".join(token.get_token_sequence()))
set_index = token.get_end_index() + 1
while set_index < len(tokens):
tokenized_sentence.append(tokens[set_index])
set_index += 1
return tokenized_sentence
@functools.total_ordering
class Token:
def __init__(self, token_sequence, start_index, end_index):
self.token_sequence = token_sequence
self.start_index = start_index
self.end_index = end_index
def get_token_sequence(self):
return self.token_sequence
def get_start_index(self):
return self.start_index
def get_end_index(self):
return self.end_index
def get_phrase_length(self):
return self.end_index - self.start_index
def overlaps_with(self, other):
"""
Checks if two phrases overlap
:param other: The other token_sequence
:return: True if overlap, False otherwise
"""
return self.get_start_index() <= other.get_end_index() and other.get_start_index() <= self.get_end_index()
def __lt__(self, other):
size_diff = other.get_phrase_length() - self.get_phrase_length()
diff = size_diff if size_diff != 0 else other.get_start_index() - self.get_start_index()
return diff < 0
def __eq__(self, other):
size_diff = other.get_phrase_length() - self.get_phrase_length()
diff = size_diff if size_diff != 0 else other.get_start_index() - self.get_start_index()
return diff == 0
def __str__(self):
return str(self.token_sequence)
def __repr__(self):
return "Token(" + str(self.start_index) + "," + str(self.end_index) + ")<" + str(self.token_sequence) + ">"
class Node:
def __init__(self):
self.children = {}
self.end_of_phrase = False
def has_child(self, value):
return value in self.children
def add_child(self, value):
self.children[value] = TokenTrie.Node()
def get_child(self, value):
return self.children[value]
def set_phrase_end(self, phrase_end):
self.end_of_phrase = phrase_end
def is_end_of_phrase(self):
return self.end_of_phrase
|
draperunner/fjlc
|
fjlc/lexicon/container/token_trie.py
|
TokenTrie.find_optimal_tokenization
|
python
|
def find_optimal_tokenization(self, tokens):
token_ranges = self.find_optimal_allocation(tokens)
tokenized_sentence = []
set_index = 0
for token in token_ranges:
while set_index < token.get_start_index():
tokenized_sentence.append(tokens[set_index])
set_index += 1
tokenized_sentence.append(" ".join(token.get_token_sequence()))
set_index = token.get_end_index() + 1
while set_index < len(tokens):
tokenized_sentence.append(tokens[set_index])
set_index += 1
return tokenized_sentence
|
Similar to {@link #findOptimalAllocation(String[])}, but also includes the words not matching any longer n-gram
in TokenTrie as singletons.
:param tokens: tokens to tokenize
:return: Optimal allocation of tokens to phrases, with non matching tokens as singletons.
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/lexicon/container/token_trie.py#L100-L123
|
[
"def find_optimal_allocation(self, tokens):\n \"\"\"\n Finds longest, non-overlapping word-ranges of phrases in tokens stored in TokenTrie\n\n :param tokens: tokens tokenize\n :type tokens: list of str\n :return: Optimal allocation of tokens to phrases\n :rtype: list of TokenTrie.Token\n \"\"\"\n token_ranges = self.find_tracked_words(tokens)\n token_ranges.sort()\n\n for offset in range(1, len(token_ranges)):\n to_be_removed = []\n for candidate in token_ranges[offset:]:\n for i in range(offset):\n if token_ranges[i].overlaps_with(candidate):\n to_be_removed.append(candidate)\n break\n\n token_ranges = [token for token in token_ranges if token not in to_be_removed]\n\n token_ranges.sort(key=lambda token: token.get_start_index())\n return token_ranges\n"
] |
class TokenTrie:
def __init__(self, sentences):
"""
Creates a phrase trie for efficient sub-phrase look up
@param sentences List of Strings of all the phrases which are whitespace delimited n-grams
"""
self.root = TokenTrie.Node()
for sentence in sentences:
words = RegexFilters.WHITESPACE.split(sentence)
self.add_token_sequence(words)
def add_token_sequence(self, token_sequence):
tree = self.root
for token in token_sequence:
if not tree.has_child(token):
tree.add_child(token)
tree = tree.get_child(token)
tree.set_phrase_end(True)
def has_tokens(self, phrase):
"""
Checks if phrase or sub-phrase exists in the tree.
If set of phrases contains phrases such as: "state", "of the" and "state of the art", look up on:
"state" returns true, "of" returns null, "of the art" returns false.
:param phrase: Phrase or sub-phrase to look up.
:type: phrase: list of str
:return: Returns true if phrase in its entirety is in the tree,
null if part of the phrase matches a larger tokenSequence,
false if phrases matches no other phrase entirely and not part any longer phrase.
"""
if len(phrase) == 1 and classifier_options.is_special_class_word(phrase[0]):
return True
tree = self.root
for token in phrase:
if not tree.has_child(token):
return False
tree = tree.get_child(token)
return True if tree.is_end_of_phrase() else None
def find_tracked_words(self, tokens):
"""
Finds word-ranges all of phrases in tokens stored in TokenTrie
:param tokens: Sequence of tokens to find phrases in
:type tokens: list of str
:return: List of Tokens found in tokens
"""
tracked_words = []
for i in range(len(tokens)):
for j in range(i + 1, len(tokens) + 1):
phrase = tokens[i:j]
status = self.has_tokens(phrase)
if status is not None:
if status is True:
tracked_words.append(TokenTrie.Token(phrase, i, j - 1))
elif status is False:
break
return tracked_words
def find_optimal_allocation(self, tokens):
"""
Finds longest, non-overlapping word-ranges of phrases in tokens stored in TokenTrie
:param tokens: tokens tokenize
:type tokens: list of str
:return: Optimal allocation of tokens to phrases
:rtype: list of TokenTrie.Token
"""
token_ranges = self.find_tracked_words(tokens)
token_ranges.sort()
for offset in range(1, len(token_ranges)):
to_be_removed = []
for candidate in token_ranges[offset:]:
for i in range(offset):
if token_ranges[i].overlaps_with(candidate):
to_be_removed.append(candidate)
break
token_ranges = [token for token in token_ranges if token not in to_be_removed]
token_ranges.sort(key=lambda token: token.get_start_index())
return token_ranges
@functools.total_ordering
class Token:
def __init__(self, token_sequence, start_index, end_index):
self.token_sequence = token_sequence
self.start_index = start_index
self.end_index = end_index
def get_token_sequence(self):
return self.token_sequence
def get_start_index(self):
return self.start_index
def get_end_index(self):
return self.end_index
def get_phrase_length(self):
return self.end_index - self.start_index
def overlaps_with(self, other):
"""
Checks if two phrases overlap
:param other: The other token_sequence
:return: True if overlap, False otherwise
"""
return self.get_start_index() <= other.get_end_index() and other.get_start_index() <= self.get_end_index()
def __lt__(self, other):
size_diff = other.get_phrase_length() - self.get_phrase_length()
diff = size_diff if size_diff != 0 else other.get_start_index() - self.get_start_index()
return diff < 0
def __eq__(self, other):
size_diff = other.get_phrase_length() - self.get_phrase_length()
diff = size_diff if size_diff != 0 else other.get_start_index() - self.get_start_index()
return diff == 0
def __str__(self):
return str(self.token_sequence)
def __repr__(self):
return "Token(" + str(self.start_index) + "," + str(self.end_index) + ")<" + str(self.token_sequence) + ">"
class Node:
def __init__(self):
self.children = {}
self.end_of_phrase = False
def has_child(self, value):
return value in self.children
def add_child(self, value):
self.children[value] = TokenTrie.Node()
def get_child(self, value):
return self.children[value]
def set_phrase_end(self, phrase_end):
self.end_of_phrase = phrase_end
def is_end_of_phrase(self):
return self.end_of_phrase
|
draperunner/fjlc
|
fjlc/main.py
|
LexiconClassifier.classify
|
python
|
def classify(self, tweets):
if type(tweets) == str:
return self.classifier.classify(tweets)
return list(map(lambda tweet: self.classifier.classify(tweet), tweets))
|
Classify tweet or tweets
:param tweets: String or array of strings to classify.
:return: String or array of strings depicting sentiment. Sentiment can be POSITIVE, NEGATIVE or NEUTRAL.
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/main.py#L78-L87
| null |
class LexiconClassifier:
def __init__(self,
lexicon=path.join(path.abspath(path.dirname(__file__)), "res/data/lexicon.pmi.json"),
options=path.join(path.abspath(path.dirname(__file__)), "res/data/options.pmi.json"),
dictionary=path.join(path.abspath(path.dirname(__file__)), "res/data/canonical.json")):
self.lexicon = lexicon
self.options = options
self.dictionary = dictionary
classifier_options.load_options(self.options)
canonical_form.load_dictionary(self.dictionary)
self.prior_polarity_lexicon = PriorPolarityLexicon(self.lexicon)
self.classifier = Classifier(self.prior_polarity_lexicon, lexical_classifier.CLASSIFIER_FILTERS)
def calculate_sentiment(self, tweets):
"""
Classify tweet or tweets
:param tweets: String or array of strings to classify.
:return: Float or array of floats depicting sentiment value.
"""
if type(tweets) == str:
return self.classifier.calculate_sentiment(tweets)
return list(map(lambda tweet: self.classifier.calculate_sentiment(tweet), tweets))
|
draperunner/fjlc
|
fjlc/utils/map_utils.py
|
normalize_map_between
|
python
|
def normalize_map_between(dictionary, norm_min, norm_max):
if len(dictionary) < 2:
return {}
values = list(dictionary.values())
norm_range = norm_max - norm_min
map_min = min(values)
map_range = max(values) - map_min
range_factor = norm_range / float(map_range)
normalized_map = {}
for key, value in dictionary.items():
normalized_map[key] = norm_min + (value - map_min) * range_factor
return normalized_map
|
Performs linear normalization of all values in Map between normMin and normMax
:param: map Map to normalize values for
:param: normMin Smallest normalized value
:param: normMax Largest normalized value
:return: A new map with double values within [normMin, normMax]
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/utils/map_utils.py#L11-L34
| null |
def sort_map_by_value(dictionary):
"""
Sorts Map by value. Map values must implement Comparable.
:param dictionary: Map to sort
:return: Sorted map
"""
return sorted(dictionary, key=dictionary.get)
"""
/**
* Sorts map given a comparator
*
* @param map Map to sort
* @param comparator Comparator used to sort elements
* @return Sorted map
*/
public static <K, V> Map<K, V> sortMapWithComparator(Map<K, V> map, Comparator<Map.Entry<K, V>> comparator) {
LinkedList<Map.Entry<K, V>> list = new LinkedList<>(map.entrySet());
Collections.sort(list, comparator);
Map<K, V> sortedHashMap = new LinkedHashMap<>();
for (Map.Entry<K, V> entry : list) {
sortedHashMap.put(entry.getKey(), entry.getValue());
}
return sortedHashMap;
}
/**
* Increments value of key by increment if present in the list, otherwise initializes the value to increment.
*
* @param map Map to increment key for
* @param key Key to increment
* @param increment Value to increment by
*/
public synchronized static <T> void incrementMapByValue(Map<T, Integer> map, T key, int increment) {
map.put(key, map.getOrDefault(key, 0) + increment);
}
/**
* Removes elements from map that are strictly smaller than the threshold element
*
* @param map Map to remove items from
* @param thresh Threshold element
*/
public static <K, V extends Comparable<V>> void removeInfrequentItems(Map<K, V> map, V thresh) {
Iterator<Map.Entry<K, V>> iter = map.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<K, V> entry = iter.next();
if (entry.getValue().compareTo(thresh) < 0) {
iter.remove();
}
}
}
/**
* Extracts the map entries with key values found in the toExtract set
*
* @param map Map to extract items from
* @param toExtract The key values
*/
public static <K, V> Map<K, V> extractItems(Map<K, V> map, Set<K> toExtract) {
Map<K, V> extractedItems = new HashMap<>();
toExtract.stream().filter(map::containsKey).forEach(key -> extractedItems.put(key, map.get(key)));
return extractedItems;
}
/**
* Merges two maps into a new map
*
* @param map1 Map to merge
* @param map2 Map to merge
*/
public static <K, V> Map<K, V> mergeMaps(Map<K, V> map1, Map<K, V> map2) {
Map<K, V> mergedMap = new HashMap<>();
mergedMap.putAll(map1);
mergedMap.putAll(map2);
return mergedMap;
}
}
"""
|
draperunner/fjlc
|
fjlc/classifier/classifier_options.py
|
load_options
|
python
|
def load_options(file_name):
words = from_json(read_entire_file_into_string(file_name))
global options, intensifiers, negators, stop_words
options = words["options"]
intensifiers = words["intensifiers"]
negators = words["negators"]
stop_words = words["stopWords"]
|
Loads options from a JSON file. The file should contain general classifier options, intensifier words with their
intensification values, negation words and stop words.
@param file_name Name of file containing the options
@throws IOException
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/classifier/classifier_options.py#L65-L79
| null |
import copy
from enum import Enum
from fjlc.utils.file_utils import read_entire_file_into_string
from fjlc.utils.json_utils import from_json
options = {}
intensifiers = {}
negators = set()
stop_words = set()
def is_stop_word(word):
return word in stop_words
def is_negation(word):
return word in negators
def is_intensifier(word):
return word in intensifiers
def contains_stop_words(words):
for word in words:
if is_stop_word(word):
return True
return False
def contains_negation(words):
for word in words:
if is_negation(word):
return True
return False
def contains_intensifier(words):
for word in words:
if is_intensifier(word):
return True
return False
def get_variable(variable):
return options[variable.name]
def set_variable(variable, value):
options[variable.name] = value
def get_options():
return copy.deepcopy(options)
def is_special_class_word(word):
return word.startswith("||") and word.endswith("||")
def get_intensifier_value(word):
intensifier = intensifiers.get(word, 0.0)
mult = get_variable(Variable.AMPLIFIER_SCALAR) if intensifier > 0 else get_variable(
Variable.DOWNTONER_SCALAR)
return mult * intensifier
class Variable(Enum):
NEGATION_VALUE = 0,
EXCLAMATION_INTENSIFIER = 1,
QUESTION_INTENSIFIER = 2,
NEGATION_SCOPE_LENGTH = 3,
DOWNTONER_SCALAR = 4,
AMPLIFIER_SCALAR = 5,
CLASSIFICATION_THRESHOLD_LOWER = 6,
CLASSIFICATION_THRESHOLD_HIGHER = 7
|
draperunner/fjlc
|
fjlc/lexicon/lexicon_creator.py
|
LexiconCreator.create_lexicon
|
python
|
def create_lexicon(self, data_set_reader, n_grams, min_total_occurrences, min_sentiment_value, filters):
counter = self.count_n_grams_py_polarity(data_set_reader, n_grams, filters)
lexicon = {}
pos = sum(map(lambda i: i.num_positive, counter.values()))
neg = sum(map(lambda i: i.num_negative, counter.values()))
ratio = neg / float(pos)
for key, value in counter.items():
if value.get_total_occurrences() <= min_total_occurrences:
continue
over = value.num_positive
under = value.num_negative
sentiment_value = math.log(ratio * over / under)
if abs(sentiment_value) >= min_sentiment_value:
lexicon[key] = sentiment_value
if RegexFilters.WHITESPACE.split(key).length == 1 and not classifier_options.is_special_class_word(key):
for related_word in adjectives.get_adverb_and_adjectives(key):
if related_word in counter and related_word not in lexicon:
lexicon[related_word] = sentiment_value
return map_utils.normalize_map_between(lexicon, -5, 5)
|
Generates sentiment lexicon using PMI on words and classification of context they are in.
:param: dataSetReader Dataset containing tweets and their sentiment classification
:param: nGrams n-grams to calculate sentiment for (with n>1, singletons are calculated automatically)
:param: minTotalOccurrences minimum number of times n-gram must have appeared in dataset before a sentiment value
is assigned (higher value gives more accurate sentiment value)
:param: minSentimentValue minimum sentiment value required to be included in lexicon (values close to 0 are
often words that are used equally in positive or negative context, possibly even
different words, but with same spelling, and thus having uncertain value)
:param: filters filters to apply to tweets before searching for n-grams
:return: map of n-grams and their sentiment values, sentiment values are in [-5, 5]
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/lexicon/lexicon_creator.py#L15-L52
|
[
"def count_n_grams_py_polarity(self, data_set_reader, n_grams, filters):\n \"\"\"\n Returns a map of n-gram and the number of times it appeared in positive context and the number of times it\n appeared in negative context in dataset file.\n\n :param data_set_reader: Dataset containing tweets and their classification\n :param n_grams: n-grams to count occurrences for\n :param filters: filters to apply to tweets in dataset before searching for n-grams\n :return: Map of Counter instances for n-grams in nGrams Collection\n \"\"\"\n self.data_set_reader = data_set_reader\n token_trie = TokenTrie(n_grams)\n\n counter = {}\n\n # Todo: parallelize\n for entry in data_set_reader.items():\n tweet = filters.apply(entry.get_tweet())\n tokens = token_trie.find_optimal_tokenization(RegexFilters.WHITESPACE.split(tweet))\n\n for n_gram in tokens:\n n_gram_words = RegexFilters.WHITESPACE.split(n_gram)\n if self.contains_illegal_word(n_gram_words):\n continue\n if not n_gram in counter:\n counter[n_gram] = self.Counter()\n\n if entry.get_classification().is_positive():\n counter[n_gram].num_positive += 1\n elif entry.get_classification().is_negative():\n counter[n_gram].num_negative += 1\n\n return counter\n"
] |
class LexiconCreator:
def __init__(self):
self.data_set_reader = None
def create_lexicon(self, data_set_reader, n_grams, min_total_occurrences, min_sentiment_value, filters):
"""
Generates sentiment lexicon using PMI on words and classification of context they are in.
:param: dataSetReader Dataset containing tweets and their sentiment classification
:param: nGrams n-grams to calculate sentiment for (with n>1, singletons are calculated automatically)
:param: minTotalOccurrences minimum number of times n-gram must have appeared in dataset before a sentiment value
is assigned (higher value gives more accurate sentiment value)
:param: minSentimentValue minimum sentiment value required to be included in lexicon (values close to 0 are
often words that are used equally in positive or negative context, possibly even
different words, but with same spelling, and thus having uncertain value)
:param: filters filters to apply to tweets before searching for n-grams
:return: map of n-grams and their sentiment values, sentiment values are in [-5, 5]
"""
counter = self.count_n_grams_py_polarity(data_set_reader, n_grams, filters)
lexicon = {}
pos = sum(map(lambda i: i.num_positive, counter.values()))
neg = sum(map(lambda i: i.num_negative, counter.values()))
ratio = neg / float(pos)
for key, value in counter.items():
if value.get_total_occurrences() <= min_total_occurrences:
continue
over = value.num_positive
under = value.num_negative
sentiment_value = math.log(ratio * over / under)
if abs(sentiment_value) >= min_sentiment_value:
lexicon[key] = sentiment_value
if RegexFilters.WHITESPACE.split(key).length == 1 and not classifier_options.is_special_class_word(key):
for related_word in adjectives.get_adverb_and_adjectives(key):
if related_word in counter and related_word not in lexicon:
lexicon[related_word] = sentiment_value
return map_utils.normalize_map_between(lexicon, -5, 5)
def count_n_grams_py_polarity(self, data_set_reader, n_grams, filters):
"""
Returns a map of n-gram and the number of times it appeared in positive context and the number of times it
appeared in negative context in dataset file.
:param data_set_reader: Dataset containing tweets and their classification
:param n_grams: n-grams to count occurrences for
:param filters: filters to apply to tweets in dataset before searching for n-grams
:return: Map of Counter instances for n-grams in nGrams Collection
"""
self.data_set_reader = data_set_reader
token_trie = TokenTrie(n_grams)
counter = {}
# Todo: parallelize
for entry in data_set_reader.items():
tweet = filters.apply(entry.get_tweet())
tokens = token_trie.find_optimal_tokenization(RegexFilters.WHITESPACE.split(tweet))
for n_gram in tokens:
n_gram_words = RegexFilters.WHITESPACE.split(n_gram)
if self.contains_illegal_word(n_gram_words):
continue
if not n_gram in counter:
counter[n_gram] = self.Counter()
if entry.get_classification().is_positive():
counter[n_gram].num_positive += 1
elif entry.get_classification().is_negative():
counter[n_gram].num_negative += 1
return counter
@staticmethod
def contains_illegal_word(n_gram):
return classifier_options.is_stop_word(n_gram[-1] or classifier_options.contains_intensifier(n_gram))
def get_progress(self):
return 0 if self.data_set_reader is None else self.data_set_reader.get_progress()
class Counter:
def __init__(self):
self.num_positive = 4
self.num_negative = 4
def get_total_occurrences(self):
return self.num_positive + self.num_negative
|
draperunner/fjlc
|
fjlc/lexicon/lexicon_creator.py
|
LexiconCreator.count_n_grams_py_polarity
|
python
|
def count_n_grams_py_polarity(self, data_set_reader, n_grams, filters):
self.data_set_reader = data_set_reader
token_trie = TokenTrie(n_grams)
counter = {}
# Todo: parallelize
for entry in data_set_reader.items():
tweet = filters.apply(entry.get_tweet())
tokens = token_trie.find_optimal_tokenization(RegexFilters.WHITESPACE.split(tweet))
for n_gram in tokens:
n_gram_words = RegexFilters.WHITESPACE.split(n_gram)
if self.contains_illegal_word(n_gram_words):
continue
if not n_gram in counter:
counter[n_gram] = self.Counter()
if entry.get_classification().is_positive():
counter[n_gram].num_positive += 1
elif entry.get_classification().is_negative():
counter[n_gram].num_negative += 1
return counter
|
Returns a map of n-gram and the number of times it appeared in positive context and the number of times it
appeared in negative context in dataset file.
:param data_set_reader: Dataset containing tweets and their classification
:param n_grams: n-grams to count occurrences for
:param filters: filters to apply to tweets in dataset before searching for n-grams
:return: Map of Counter instances for n-grams in nGrams Collection
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/lexicon/lexicon_creator.py#L54-L86
|
[
"def contains_illegal_word(n_gram):\n return classifier_options.is_stop_word(n_gram[-1] or classifier_options.contains_intensifier(n_gram))\n"
] |
class LexiconCreator:
def __init__(self):
self.data_set_reader = None
def create_lexicon(self, data_set_reader, n_grams, min_total_occurrences, min_sentiment_value, filters):
"""
Generates sentiment lexicon using PMI on words and classification of context they are in.
:param: dataSetReader Dataset containing tweets and their sentiment classification
:param: nGrams n-grams to calculate sentiment for (with n>1, singletons are calculated automatically)
:param: minTotalOccurrences minimum number of times n-gram must have appeared in dataset before a sentiment value
is assigned (higher value gives more accurate sentiment value)
:param: minSentimentValue minimum sentiment value required to be included in lexicon (values close to 0 are
often words that are used equally in positive or negative context, possibly even
different words, but with same spelling, and thus having uncertain value)
:param: filters filters to apply to tweets before searching for n-grams
:return: map of n-grams and their sentiment values, sentiment values are in [-5, 5]
"""
counter = self.count_n_grams_py_polarity(data_set_reader, n_grams, filters)
lexicon = {}
pos = sum(map(lambda i: i.num_positive, counter.values()))
neg = sum(map(lambda i: i.num_negative, counter.values()))
ratio = neg / float(pos)
for key, value in counter.items():
if value.get_total_occurrences() <= min_total_occurrences:
continue
over = value.num_positive
under = value.num_negative
sentiment_value = math.log(ratio * over / under)
if abs(sentiment_value) >= min_sentiment_value:
lexicon[key] = sentiment_value
if RegexFilters.WHITESPACE.split(key).length == 1 and not classifier_options.is_special_class_word(key):
for related_word in adjectives.get_adverb_and_adjectives(key):
if related_word in counter and related_word not in lexicon:
lexicon[related_word] = sentiment_value
return map_utils.normalize_map_between(lexicon, -5, 5)
@staticmethod
def contains_illegal_word(n_gram):
return classifier_options.is_stop_word(n_gram[-1] or classifier_options.contains_intensifier(n_gram))
def get_progress(self):
return 0 if self.data_set_reader is None else self.data_set_reader.get_progress()
class Counter:
def __init__(self):
self.num_positive = 4
self.num_negative = 4
def get_total_occurrences(self):
return self.num_positive + self.num_negative
|
draperunner/fjlc
|
fjlc/lexicon/container/adjectives.py
|
form_adverb_from_adjective
|
python
|
def form_adverb_from_adjective(adjective):
# If the adjective ends in -able, -ible, or -le, replace the -e with -y
if adjective.endswith("able") or adjective.endswith("ible") or adjective.endswith("le"):
return adjective[:-1] + "y"
# If the adjective ends in -y, replace the y with i and add -ly
elif adjective.endswith("y"):
return adjective[:-1] + "ily"
# If the adjective ends in -ic, add -ally
elif adjective.endswith("ic"):
return adjective[:-2] + "ally"
# In most cases, an adverb is formed by adding -ly to an adjective
return adjective + "ly"
|
Forms an adverb from the input adjective, f.ex. "happy" => "happily".
Adverbs are generated using rules from: http://www.edufind.com/english-grammar/forming-adverbs-adjectives/
:param adjective: adjective
:return: adverb form of the input adjective
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/lexicon/container/adjectives.py#L87-L109
| null |
VOWELS = "aeiouy"
SEPARATOR = ","
def get_adverb_and_adjectives(word):
if not consists_only_of_alphabetical_characters(word):
return []
adjectives = get_comparative_and_superlative_adjectives(word)
if len(adjectives) == 0:
return [form_adverb_from_adjective(word)]
else:
adjectives = form_adverb_from_adjective(word) + SEPARATOR + adjectives
return adjectives.split(SEPARATOR)
def get_comparative_and_superlative_adjectives(word):
"""
Returns a comma separated string with the comparative and the superlative forms of the input adjective. F.ex.
"good" => "better,best", "happy" => "happier,happiest". If the comparative and superlative forms are
"more [word]" and "most [word]", empty string is returned, f.ex. "careful" => "".
Forms are generated using rules from: http://www.eflnet.com/tutorials/adjcompsup.php
:param word: adjective
:return: comma separated comparative and the superlative forms of the input adjective
"""
if word == "good":
return "better,best"
if word == "bad":
return "worse,worst"
if word == "far":
return "farther,farthest"
if word == "little":
return "less,least"
if word == "slow":
return "slower,slowest"
return normal_comparative_and_superlative_adjectives(word)
def normal_comparative_and_superlative_adjectives(word):
number_of_syllables = get_number_of_syllables(word)
sb = ""
if number_of_syllables == 1: # If one-syllable adjective
last_letter = word[-1]
# If the adjective ends with an e, just add –r for the comparative form and –st for the superlative form
if word.endswith("e"):
sb += word + "r"
sb += SEPARATOR + word + "st"
# If the adjective ends with –y, change the y to i and add –er for the comparative form.
# For the superlative form change the y to i and add –est.
elif word.endswith("y"):
stub = word[:-1]
sb += stub + "ier"
sb += SEPARATOR + stub + "iest"
# If the adjective ends with a single consonant with a vowel before it, double the consonant and add –er
# for the comparative form; and double the consonant and add –est for the superlative form
elif is_vowel(word[-2]) and not is_vowel(last_letter):
sb += word + last_letter + "er"
sb += SEPARATOR + word + last_letter + "est"
# Otherwise just add -er for the comparative form and -est for the superlative form
else:
sb += word + "er"
sb += SEPARATOR + word + "est"
elif number_of_syllables == 2: # If two-syllable adjective
# If the adjective ends with –y, change the y to i and add –er for the comparative form.
# For the superlative form change the y to i and add –est.
if word.endswith("y"):
stub = word[:-1]
sb += stub + "ier"
sb += SEPARATOR + stub + "iest"
# If the adjective ending in –er, -le, or –ow, add –er and –est to form the comparative and superlative forms
elif word.endswith("er") or word.endswith("le") or word.endswith("ow"):
sb += word + "er"
sb += SEPARATOR + word + "est"
return sb
def get_number_of_syllables(word):
count = 0
last_is_consonant = True
for i in range(len(word) - 1):
if is_vowel(word[i]):
if last_is_consonant:
count += 1
last_is_consonant = False
else:
last_is_consonant = True
return count
def is_vowel(character):
return character in VOWELS
def consists_only_of_alphabetical_characters(name):
return name.isalpha()
|
draperunner/fjlc
|
fjlc/classifier/classifier.py
|
Classifier.classify
|
python
|
def classify(self, tweet):
sentiment_value = self.calculate_sentiment(tweet)
return Classification.classify_from_thresholds(sentiment_value,
classifier_options.get_variable(
classifier_options.Variable.CLASSIFICATION_THRESHOLD_LOWER),
classifier_options.get_variable(
classifier_options.Variable.CLASSIFICATION_THRESHOLD_HIGHER))
|
Classifies the tweet into one of three classes (negative, neutral or positive) depending on the sentiment value
of the tweet and the thresholds specified in the classifier_options
:param tweet: String tweet to classify
:return: Sentiment classification (negative, neutral or positive)
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/classifier/classifier.py#L26-L40
|
[
"def calculate_sentiment(self, tweet):\n if self.filters is not None:\n tweet = self.filters.apply(tweet)\n\n lexical_tokens = lexical_parser.lexically_parse_tweet(tweet, self.phrase_tree)\n for i in range(len(lexical_tokens)):\n token = lexical_tokens[i]\n phrase = token.get_phrase()\n\n if self.lexicon.has_token(phrase):\n token.set_lexical_value(self.lexicon.get_token_polarity(phrase))\n\n elif classifier_options.is_negation(phrase):\n propagate_negation(lexical_tokens, i)\n\n elif classifier_options.is_intensifier(phrase):\n intensify_next(lexical_tokens, i, classifier_options.get_intensifier_value(phrase))\n\n return sum(map(lambda t: t.get_sentiment_value(), lexical_tokens))\n"
] |
class Classifier:
def __init__(self, lexicon, filters=None):
self.lexicon = lexicon
self.filters = filters
self.phrase_tree = TokenTrie(lexicon.get_subjective_words())
def calculate_sentiment(self, tweet):
if self.filters is not None:
tweet = self.filters.apply(tweet)
lexical_tokens = lexical_parser.lexically_parse_tweet(tweet, self.phrase_tree)
for i in range(len(lexical_tokens)):
token = lexical_tokens[i]
phrase = token.get_phrase()
if self.lexicon.has_token(phrase):
token.set_lexical_value(self.lexicon.get_token_polarity(phrase))
elif classifier_options.is_negation(phrase):
propagate_negation(lexical_tokens, i)
elif classifier_options.is_intensifier(phrase):
intensify_next(lexical_tokens, i, classifier_options.get_intensifier_value(phrase))
return sum(map(lambda t: t.get_sentiment_value(), lexical_tokens))
|
draperunner/fjlc
|
fjlc/utils/json_utils.py
|
to_json
|
python
|
def to_json(data, pretty):
if pretty:
return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
return json.dumps(data)
|
Converts object to JSON formatted string with typeToken adapter
:param data: A dictionary to convert to JSON string
:param pretty: A boolean deciding whether or not to pretty format the JSON string
:return: The JSON string
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/utils/json_utils.py#L6-L15
| null |
import json
import fjlc.utils.file_utils as file_utils
def to_json_file(file, data, pretty):
"""
Writes object instance in JSON formatted String to file
:param file: File to write JSON string ot
:param data: Object to convert to JSON
:param pretty: Use pretty formatting or not
"""
json_string = to_json(data, pretty)
file_utils.write_to_file(file, json_string)
def from_json(json_string):
"""
Parses JSON String and returns corresponding dictionary
:param json_string:
:return:
"""
return json.loads(json_string)
def from_json_file(file_name):
"""
Parses JSON from file and returns corresponding instance
:param file_name: File containing JSON formatted object
:return: A dictionary representing the JSON
"""
return from_json(file_utils.read_entire_file_into_string(file_name))
|
draperunner/fjlc
|
fjlc/utils/json_utils.py
|
to_json_file
|
python
|
def to_json_file(file, data, pretty):
json_string = to_json(data, pretty)
file_utils.write_to_file(file, json_string)
|
Writes object instance in JSON formatted String to file
:param file: File to write JSON string ot
:param data: Object to convert to JSON
:param pretty: Use pretty formatting or not
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/utils/json_utils.py#L18-L27
|
[
"def to_json(data, pretty):\n \"\"\"\n Converts object to JSON formatted string with typeToken adapter\n :param data: A dictionary to convert to JSON string\n :param pretty: A boolean deciding whether or not to pretty format the JSON string\n :return: The JSON string\n \"\"\"\n if pretty:\n return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))\n return json.dumps(data)\n"
] |
import json
import fjlc.utils.file_utils as file_utils
def to_json(data, pretty):
"""
Converts object to JSON formatted string with typeToken adapter
:param data: A dictionary to convert to JSON string
:param pretty: A boolean deciding whether or not to pretty format the JSON string
:return: The JSON string
"""
if pretty:
return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
return json.dumps(data)
def to_json_file(file, data, pretty):
"""
Writes object instance in JSON formatted String to file
:param file: File to write JSON string ot
:param data: Object to convert to JSON
:param pretty: Use pretty formatting or not
"""
json_string = to_json(data, pretty)
file_utils.write_to_file(file, json_string)
def from_json(json_string):
"""
Parses JSON String and returns corresponding dictionary
:param json_string:
:return:
"""
return json.loads(json_string)
def from_json_file(file_name):
"""
Parses JSON from file and returns corresponding instance
:param file_name: File containing JSON formatted object
:return: A dictionary representing the JSON
"""
return from_json(file_utils.read_entire_file_into_string(file_name))
|
draperunner/fjlc
|
fjlc/preprocessing/preprocessors/tweet_n_grams_pmi.py
|
TweetNGramsPMI.get_frequent_n_grams
|
python
|
def get_frequent_n_grams(self, input_reader, n, min_frequency, min_pmi, filters):
line_counter = 0
TweetNGramsPMI.tweet_reader = input_reader
TweetNGramsPMI.n_gram_tree = self.NGramTree()
# Todo: Parallelize
for tweet in self.tweet_reader:
line_counter += 1
if line_counter % 200000 == 0:
TweetNGramsPMI.n_gram_tree.prune_infrequent(math.ceil(min_frequency * line_counter / 2.))
tweet = filters.apply(tweet)
for sentence in RegexFilters.SENTENCE_END_PUNCTUATION.split(tweet):
tokens = RegexFilters.WHITESPACE.split(sentence.strip())
if len(tokens) == 1:
continue
for i in range(len(tokens)):
self.n_gram_tree.increment_n_gram(tokens[i:min(i + n, len(tokens))])
return self.n_gram_tree.get_n_grams(int(min_frequency * line_counter), min_pmi)
|
Finds all frequent (and meaningful) n-grams in a file, treating each new line as a new document.
:param input_reader: LineReader initialized on file with documents to generate n-grams for
:param n: Maximum n-gram length
:param min_frequency: Smallest required frequency to include n-gram
:param min_pmi: Minimum PMI value for n-gram to be included
:param filters: List of filters to apply to document before generating n-grams
:return: Map of n-grams as key and number of occurrences as value
|
train
|
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/preprocessing/preprocessors/tweet_n_grams_pmi.py#L12-L42
| null |
class TweetNGramsPMI:
tweet_reader = None
n_gram_tree = None
def get_frequent_n_grams(self, input_reader, n, min_frequency, min_pmi, filters):
"""
Finds all frequent (and meaningful) n-grams in a file, treating each new line as a new document.
:param input_reader: LineReader initialized on file with documents to generate n-grams for
:param n: Maximum n-gram length
:param min_frequency: Smallest required frequency to include n-gram
:param min_pmi: Minimum PMI value for n-gram to be included
:param filters: List of filters to apply to document before generating n-grams
:return: Map of n-grams as key and number of occurrences as value
"""
line_counter = 0
TweetNGramsPMI.tweet_reader = input_reader
TweetNGramsPMI.n_gram_tree = self.NGramTree()
# Todo: Parallelize
for tweet in self.tweet_reader:
line_counter += 1
if line_counter % 200000 == 0:
TweetNGramsPMI.n_gram_tree.prune_infrequent(math.ceil(min_frequency * line_counter / 2.))
tweet = filters.apply(tweet)
for sentence in RegexFilters.SENTENCE_END_PUNCTUATION.split(tweet):
tokens = RegexFilters.WHITESPACE.split(sentence.strip())
if len(tokens) == 1:
continue
for i in range(len(tokens)):
self.n_gram_tree.increment_n_gram(tokens[i:min(i + n, len(tokens))])
return self.n_gram_tree.get_n_grams(int(min_frequency * line_counter), min_pmi)
@staticmethod
def get_progress():
return 0 if TweetNGramsPMI.tweet_reader is None else TweetNGramsPMI.tweet_reader.get_progress()
class NGramTree:
def __init__(self):
self.root = TweetNGramsPMI.Node("")
def increment_n_gram(self, n_gram):
current = self.root
current.num_occurrences += 1
for word in n_gram:
if not current.has_child(word):
current.add_child(word)
current = current.get_child(word)
current.num_occurrences += 1
def get_node(self, phrase):
current = self.root
for word in RegexFilters.WHITESPACE.split(phrase):
if not current.has_child(word):
return None
current = current.get_child(word)
return current
def prune_infrequent(self, limit):
self.root.prune_infrequent(limit)
def get_n_grams(self, limit, inclusion_threshold):
all_n_grams = {}
for child in self.root.children.values():
child.add_frequent_phrases(all_n_grams, limit, child.phrase)
filtered_n_grams = []
for next_key, next_value in all_n_grams.items():
n_gram_tokens = RegexFilters.WHITESPACE.split(next_key)
if next_value >= inclusion_threshold and not classifier_options.contains_intensifier(n_gram_tokens) and not classifier_options.is_stop_word(n_gram_tokens[-1]):
filtered_n_grams.append(next_key)
return filtered_n_grams
class Node:
def __init__(self, phrase):
self.children = {}
self.phrase = phrase
self.num_occurrences = 0
self.log_score = 0.0
def has_child(self, value):
return value in self.children
def add_child(self, value):
self.children[value] = TweetNGramsPMI.Node(value)
def get_child(self, value):
return self.children[value]
def get_log_score(self):
if self.log_score == 0.0:
self.log_score = math.log(self.num_occurrences)
return self.log_score
def prune_infrequent(self, limit):
self.children = [child for child in self.children if child.get_value().num_occurrences >= limit]
for child in self.children:
child.get_value().prune_infrequent(limit)
def add_frequent_phrases(self, dictionary, limit, prefix):
for child in self.children.values():
if child.num_occurrences < limit:
continue
last_word = TweetNGramsPMI.n_gram_tree.get_node(child.phrase)
if last_word is not None and last_word.num_occurrences >= limit:
temp = TweetNGramsPMI.n_gram_tree.root.get_log_score() + child.get_log_score() - self.get_log_score() - last_word.get_log_score()
candidate = prefix + " " + child.phrase
dictionary[candidate] = temp
child.add_frequent_phrases(dictionary, limit, candidate)
|
inspirehep/inspire-utils
|
inspire_utils/dedupers.py
|
dedupe_list_of_dicts
|
python
|
def dedupe_list_of_dicts(ld):
def _freeze(o):
"""Recursively freezes a dict into an hashable object.
Adapted from http://stackoverflow.com/a/21614155/374865.
"""
if isinstance(o, dict):
return frozenset((k, _freeze(v)) for k, v in six.iteritems(o))
elif isinstance(o, (list, tuple)):
return tuple(_freeze(v) for v in o)
else:
return o
result = []
seen = set()
for d in ld:
f = _freeze(d)
if f not in seen:
result.append(d)
seen.add(f)
return result
|
Remove duplicates from a list of dictionaries preserving the order.
We can't use the generic list helper because a dictionary isn't hashable.
Adapted from http://stackoverflow.com/a/9427216/374865.
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/dedupers.py#L43-L70
|
[
"def _freeze(o):\n \"\"\"Recursively freezes a dict into an hashable object.\n\n Adapted from http://stackoverflow.com/a/21614155/374865.\n \"\"\"\n if isinstance(o, dict):\n return frozenset((k, _freeze(v)) for k, v in six.iteritems(o))\n elif isinstance(o, (list, tuple)):\n return tuple(_freeze(v) for v in o)\n else:\n return o\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import six
def dedupe_list(l):
"""Remove duplicates from a list preserving the order.
We might be tempted to use the list(set(l)) idiom, but it doesn't preserve
the order, which hinders testability.
"""
result = []
for el in l:
if el not in result:
result.append(el)
return result
|
inspirehep/inspire-utils
|
inspire_utils/helpers.py
|
force_list
|
python
|
def force_list(data):
if data is None:
return []
elif not isinstance(data, (list, tuple, set)):
return [data]
elif isinstance(data, (tuple, set)):
return list(data)
return data
|
Force ``data`` to become a list.
You should use this method whenever you don't want to deal with the
fact that ``NoneType`` can't be iterated over. For example, instead
of writing::
bar = foo.get('bar')
if bar is not None:
for el in bar:
...
you can write::
for el in force_list(foo.get('bar')):
...
Args:
data: any Python object.
Returns:
list: a list representation of ``data``.
Examples:
>>> force_list(None)
[]
>>> force_list('foo')
['foo']
>>> force_list(('foo', 'bar'))
['foo', 'bar']
>>> force_list(['foo', 'bar', 'baz'])
['foo', 'bar', 'baz']
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/helpers.py#L30-L70
| null |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from lxml import etree
import six
def maybe_float(el):
"""Return a ``float`` if possible, otherwise ``None``.
Args:
el: any Python object.
Returns:
float: a ``float`` parsed from the object, or ``None``.
Examples:
>>> maybe_float('35.0499505')
35.0499505
"""
try:
return float(el)
except (TypeError, ValueError):
pass
def maybe_int(el):
"""Return an ``int`` if possible, otherwise ``None``.
Args:
el: any Python object.
Returns:
Union[int, NoneType]: an ``int`` parsed from the object, or ``None``.
Examples:
>>> maybe_int('10')
10
"""
try:
return int(el)
except (TypeError, ValueError):
pass
def remove_tags(dirty, allowed_tags=(), allowed_trees=(), strip=None):
"""Selectively remove tags.
This removes all tags in ``dirty``, stripping also the contents of tags
matching the XPath selector in ``strip``, and keeping all tags that are
subtags of tags in ``allowed_trees`` and tags in ``allowed_tags``.
Args:
dirty(Union[str, scrapy.selector.Selector, lxml.etree._Element]): the
input to clean up.
allowed_tags(Container): tags to be kept in the output, but not necessarily
its subtags.
allowed_trees(Container): tags to be kept, along with all its subtags,
in the output.
strip(str): optional xpath selector. If it matches a tag, its
contents will also be stripped. Useful axes are ``@`` for attribute access
and ``self`` to select a given tag.
Returns:
str: the textual content of ``dirty``, with some tags kept and some text
removed.
Examples:
>>> tag = '<p><b><i>Only</i></b> this text remains.<span class="hidden">Not this one.</span></p>'
>>> remove_tags(tag, allowed_tree=('b',), strip='@class="hidden"')
u'<b><i>Only</i></b> this text remains.'
>>> remove_tags(tag, allowed_tags=('b',), strip='@class="hidden"')
u'<b>Only</b> this text remains.'
>>> remove_tags(tag, allowed_tags=('b',), strip='self::span')
u'<b>Only</b> this text remains.'
"""
if isinstance(dirty, six.string_types):
element = etree.fromstring(u''.join(('<DUMMYROOTTAG>', dirty, '</DUMMYROOTTAG>')))
elif isinstance(dirty, etree._Element):
element = dirty
else: # assuming scrapy Selector
element = dirty.root
if element.tag in allowed_trees:
return etree.tostring(element, encoding='unicode')
tail = element.tail or u''
if strip and element.xpath(strip):
return tail
subtext = u''.join(
remove_tags(child, allowed_tags=allowed_tags, allowed_trees=allowed_trees, strip=strip)
for child in element
)
text = element.text or u''
if element.tag in allowed_tags:
for child in element:
element.remove(child)
element.text = u''.join((text, subtext))
return etree.tostring(element, encoding='unicode')
return u''.join((text, subtext, tail))
|
inspirehep/inspire-utils
|
inspire_utils/helpers.py
|
remove_tags
|
python
|
def remove_tags(dirty, allowed_tags=(), allowed_trees=(), strip=None):
if isinstance(dirty, six.string_types):
element = etree.fromstring(u''.join(('<DUMMYROOTTAG>', dirty, '</DUMMYROOTTAG>')))
elif isinstance(dirty, etree._Element):
element = dirty
else: # assuming scrapy Selector
element = dirty.root
if element.tag in allowed_trees:
return etree.tostring(element, encoding='unicode')
tail = element.tail or u''
if strip and element.xpath(strip):
return tail
subtext = u''.join(
remove_tags(child, allowed_tags=allowed_tags, allowed_trees=allowed_trees, strip=strip)
for child in element
)
text = element.text or u''
if element.tag in allowed_tags:
for child in element:
element.remove(child)
element.text = u''.join((text, subtext))
return etree.tostring(element, encoding='unicode')
return u''.join((text, subtext, tail))
|
Selectively remove tags.
This removes all tags in ``dirty``, stripping also the contents of tags
matching the XPath selector in ``strip``, and keeping all tags that are
subtags of tags in ``allowed_trees`` and tags in ``allowed_tags``.
Args:
dirty(Union[str, scrapy.selector.Selector, lxml.etree._Element]): the
input to clean up.
allowed_tags(Container): tags to be kept in the output, but not necessarily
its subtags.
allowed_trees(Container): tags to be kept, along with all its subtags,
in the output.
strip(str): optional xpath selector. If it matches a tag, its
contents will also be stripped. Useful axes are ``@`` for attribute access
and ``self`` to select a given tag.
Returns:
str: the textual content of ``dirty``, with some tags kept and some text
removed.
Examples:
>>> tag = '<p><b><i>Only</i></b> this text remains.<span class="hidden">Not this one.</span></p>'
>>> remove_tags(tag, allowed_tree=('b',), strip='@class="hidden"')
u'<b><i>Only</i></b> this text remains.'
>>> remove_tags(tag, allowed_tags=('b',), strip='@class="hidden"')
u'<b>Only</b> this text remains.'
>>> remove_tags(tag, allowed_tags=('b',), strip='self::span')
u'<b>Only</b> this text remains.'
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/helpers.py#L113-L171
| null |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from lxml import etree
import six
def force_list(data):
"""Force ``data`` to become a list.
You should use this method whenever you don't want to deal with the
fact that ``NoneType`` can't be iterated over. For example, instead
of writing::
bar = foo.get('bar')
if bar is not None:
for el in bar:
...
you can write::
for el in force_list(foo.get('bar')):
...
Args:
data: any Python object.
Returns:
list: a list representation of ``data``.
Examples:
>>> force_list(None)
[]
>>> force_list('foo')
['foo']
>>> force_list(('foo', 'bar'))
['foo', 'bar']
>>> force_list(['foo', 'bar', 'baz'])
['foo', 'bar', 'baz']
"""
if data is None:
return []
elif not isinstance(data, (list, tuple, set)):
return [data]
elif isinstance(data, (tuple, set)):
return list(data)
return data
def maybe_float(el):
"""Return a ``float`` if possible, otherwise ``None``.
Args:
el: any Python object.
Returns:
float: a ``float`` parsed from the object, or ``None``.
Examples:
>>> maybe_float('35.0499505')
35.0499505
"""
try:
return float(el)
except (TypeError, ValueError):
pass
def maybe_int(el):
"""Return an ``int`` if possible, otherwise ``None``.
Args:
el: any Python object.
Returns:
Union[int, NoneType]: an ``int`` parsed from the object, or ``None``.
Examples:
>>> maybe_int('10')
10
"""
try:
return int(el)
except (TypeError, ValueError):
pass
|
inspirehep/inspire-utils
|
inspire_utils/logging.py
|
getStackTraceLogger
|
python
|
def getStackTraceLogger(*args, **kwargs):
logger = logging.getLogger(*args, **kwargs)
return StackTraceLogger(logger)
|
Returns a :class:`StackTrace` logger that wraps a Python logger instance.
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/logging.py#L45-L48
| null |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import logging
class StackTraceLogger(object):
def __init__(self, logger):
self.logger = logger
def __getattr__(self, item):
"""Preserve Python logger interface."""
return getattr(self.logger, item)
def error(self, message, *args, **kwargs):
"""Log error with stack trace and locals information.
By default, enables stack trace information in logging messages, so that stacktrace and locals appear in Sentry.
"""
kwargs.setdefault('extra', {}).setdefault('stack', True)
return self.logger.error(message, *args, **kwargs)
|
inspirehep/inspire-utils
|
inspire_utils/logging.py
|
StackTraceLogger.error
|
python
|
def error(self, message, *args, **kwargs):
kwargs.setdefault('extra', {}).setdefault('stack', True)
return self.logger.error(message, *args, **kwargs)
|
Log error with stack trace and locals information.
By default, enables stack trace information in logging messages, so that stacktrace and locals appear in Sentry.
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/logging.py#L36-L42
| null |
class StackTraceLogger(object):
def __init__(self, logger):
self.logger = logger
def __getattr__(self, item):
"""Preserve Python logger interface."""
return getattr(self.logger, item)
|
inspirehep/inspire-utils
|
inspire_utils/config.py
|
load_config
|
python
|
def load_config(paths=DEFAULT_CONFIG_PATHS):
config = Config()
for path in paths:
if os.path.isfile(path):
config.load_pyfile(path)
return config
|
Attempt to load config from paths, in order.
Args:
paths (List[string]): list of paths to python files
Return:
Config: loaded config
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/config.py#L75-L89
|
[
"def load_pyfile(self, path):\n \"\"\"Load python file as config.\n\n Args:\n path (string): path to the python file\n \"\"\"\n with open(path) as config_file:\n contents = config_file.read()\n try:\n exec(compile(contents, path, 'exec'), self)\n except Exception as e:\n raise MalformedConfig(path, six.text_type(e))\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2018 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""INSPIRE configuration loader.
Inspired by the Flask configuration loader:
https://github.com/pallets/flask/blob/40745bb338c45498ca19010175f341332ab2eefb/flask/config.py
"""
from __future__ import absolute_import, division, print_function
import os
import six
DEFAULT_CONFIG_PATHS = (
'./var/inspirehep-instance/inspirehep.cfg',
'./inspirehep.cfg',
)
class MalformedConfig(Exception):
def __init__(self, file_path, cause):
"""Exception to be raised if pased file is invalid.
Args:
file_path (string): path to bad config
cause (string): reason of failure, i.e. what exactly was the
problem while parsing
"""
message = six.text_type("Malformed config at {}: {}").format(
file_path,
cause
)
super(MalformedConfig, self).__init__(message)
class Config(dict):
def __init__(self, defaults=None):
super(Config, self).__init__(defaults or {})
def load_pyfile(self, path):
"""Load python file as config.
Args:
path (string): path to the python file
"""
with open(path) as config_file:
contents = config_file.read()
try:
exec(compile(contents, path, 'exec'), self)
except Exception as e:
raise MalformedConfig(path, six.text_type(e))
|
inspirehep/inspire-utils
|
inspire_utils/config.py
|
Config.load_pyfile
|
python
|
def load_pyfile(self, path):
with open(path) as config_file:
contents = config_file.read()
try:
exec(compile(contents, path, 'exec'), self)
except Exception as e:
raise MalformedConfig(path, six.text_type(e))
|
Load python file as config.
Args:
path (string): path to the python file
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/config.py#L61-L72
| null |
class Config(dict):
def __init__(self, defaults=None):
super(Config, self).__init__(defaults or {})
|
inspirehep/inspire-utils
|
inspire_utils/record.py
|
get_value
|
python
|
def get_value(record, key, default=None):
def getitem(k, v, default):
if isinstance(v, string_types):
raise KeyError
elif isinstance(v, dict):
return v[k]
elif ']' in k:
k = k[:-1].replace('n', '-1')
# Work around for list indexes and slices
try:
return v[int(k)]
except IndexError:
return default
except ValueError:
return v[slice(*map(
lambda x: int(x.strip()) if x.strip() else None,
k.split(':')
))]
else:
tmp = []
for inner_v in v:
try:
tmp.append(getitem(k, inner_v, default))
except KeyError:
continue
return tmp
# Wrap a top-level list in a dict
if isinstance(record, list):
record = {'record': record}
key = '.'.join(['record', key])
# Check if we are using python regular keys
try:
return record[key]
except KeyError:
pass
keys = SPLIT_KEY_PATTERN.split(key)
value = record
for k in keys:
try:
value = getitem(k, value, default)
except KeyError:
return default
return value
|
Return item as `dict.__getitem__` but using 'smart queries'.
.. note::
Accessing one value in a normal way, meaning d['a'], is almost as
fast as accessing a regular dictionary. But using the special
name convention is a bit slower than using the regular access:
.. code-block:: python
>>> %timeit x = dd['a[0].b']
100000 loops, best of 3: 3.94 us per loop
>>> %timeit x = dd['a'][0]['b']
1000000 loops, best of 3: 598 ns per loop
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/record.py#L33-L91
|
[
"def getitem(k, v, default):\n if isinstance(v, string_types):\n raise KeyError\n elif isinstance(v, dict):\n return v[k]\n elif ']' in k:\n k = k[:-1].replace('n', '-1')\n # Work around for list indexes and slices\n try:\n return v[int(k)]\n except IndexError:\n return default\n except ValueError:\n return v[slice(*map(\n lambda x: int(x.strip()) if x.strip() else None,\n k.split(':')\n ))]\n else:\n tmp = []\n for inner_v in v:\n try:\n tmp.append(getitem(k, inner_v, default))\n except KeyError:\n continue\n return tmp\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from six import string_types
import re
SPLIT_KEY_PATTERN = re.compile(r'\.|\[')
def get_values_for_schema(elements, schema):
"""Return all values from elements having a given schema.
Args:
elements(Iterable[dict]): an iterable of elements, which are all dicts
having at least the ``schema`` and ``value`` keys.
schema(str): the schema that the values need to follow.
Returns:
list: all values conforming to the given schema.
Example:
>>> elements = [
... {'schema': 'TWITTER', 'value': 's_w_hawking'},
... {'schema': 'WIKIPEDIA', 'value': 'Stephen_Hawking'}
... ]
>>> get_values_for_schema(elements, 'TWITTER')
['s_w_hawking']
"""
return [element['value'] for element in elements if element['schema'] == schema]
|
inspirehep/inspire-utils
|
inspire_utils/name.py
|
_prepare_nameparser_constants
|
python
|
def _prepare_nameparser_constants():
constants = Constants()
roman_numeral_suffixes = [u'v', u'vi', u'vii', u'viii', u'ix', u'x',
u'xii', u'xiii', u'xiv', u'xv']
titles = [u'Dr', u'Prof', u'Professor', u'Sir', u'Editor', u'Ed', u'Mr',
u'Mrs', u'Ms', u'Chair', u'Co-Chair', u'Chairs', u'co-Chairs']
constants.titles.remove(*constants.titles).add(*titles)
constants.suffix_not_acronyms.add(*roman_numeral_suffixes)
return constants
|
Prepare nameparser Constants.
Remove nameparser's titles and use our own and add as suffixes the roman numerals.
Configuration is the same for all names (i.e. instances).
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/name.py#L41-L54
| null |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from itertools import product
from nameparser import HumanName
from nameparser.config import Constants
import six
from unidecode import unidecode
from .logging import getStackTraceLogger
LOGGER = getStackTraceLogger(__name__)
_LASTNAME_NON_LASTNAME_SEPARATORS = [u' ', u', ']
_NAMES_MAX_NUMBER_THRESHOLD = 5
"""Threshold for skipping the combinatorial expansion of names (when generating name variations). """
class ParsedName(object):
"""Class for representing a name.
After construction, the instance exposes the fields exposed by `HumanName` instance, i.e.
`title`, `first`, `middle`, `last`, `suffix`.
"""
constants = _prepare_nameparser_constants()
"""The default constants configuration for `HumanName` to use for parsing all names."""
def __init__(self, name, constants=None):
"""Create a ParsedName instance.
Args:
name (Union[str, HumanName]): The name to be parsed (must be non empty nor None).
constants (:class:`nameparser.config.Constants`): Configuration for `HumanName` instantiation.
(Can be None, if provided it overwrites the default one generated in
:method:`prepare_nameparser_constants`.)
"""
if not constants:
constants = ParsedName.constants
if isinstance(name, HumanName):
self._parsed_name = name
else:
self._parsed_name = HumanName(name, constants=constants)
self._parsed_name.capitalize()
def __iter__(self):
return self._parsed_name
def __len__(self):
return len(self._parsed_name)
def __repr__(self):
return repr(self._parsed_name)
def __str__(self):
return str(self._parsed_name)
@property
def first_initials(self):
return u' '.join(self.first_initials_list)
@property
def first(self):
return u'{} {}'.format(self._parsed_name.first, self._parsed_name.middle).strip()
@property
def first_initials_list(self):
names = self.first_list
return [(name[0] + u'.') for name in names]
@property
def first_list(self):
return self._parsed_name.first_list + self._parsed_name.middle_list
@property
def last(self):
return self._parsed_name.last
@property
def last_list(self):
return self._parsed_name.last_list
@property
def suffix(self):
return self._parsed_name.suffix
@property
def suffix_list(self):
return self._parsed_name.suffix_list
@classmethod
def loads(cls, name):
"""Load a parsed name from a string.
Raises:
TypeError: when name isn't a type of `six.string_types`.
ValueError: when name is empty or None.
"""
if not isinstance(name, six.string_types):
raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(
classname=cls.__name__, string_types=repr(six.string_types)
))
if not name or name.isspace():
raise ValueError('name must not be empty')
return cls(name)
def dumps(self):
"""Dump the name to string, after normalizing it."""
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name
def pprint(self, initials_only=False):
"""Pretty print the name.
Args:
initials_only (bool): ``True`` if we want the first names to be displayed with
only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> ParsedName('Lieber, Stanley Martin').pprint()
u'Stanley Martin Lieber'
>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)
u'S. M. Lieber'
>>> ParsedName('Downey, Robert Jr.').pprint(initials_only=True)
u'R. Downey Jr.'
"""
last_name = self.last
suffixes = ', ' + self.suffix if self.suffix else ''
if initials_only and last_name != u'':
first_names = self.first_initials
else:
first_names = self.first
return u'{} {}{}'.format(first_names, last_name, suffixes).strip()
@classmethod
def from_parts(
cls,
first=None,
last=None,
middle=None,
suffix=None,
title=None
):
name = HumanName()
name.first = first
name.middle = middle
name.last = last
name.suffix = suffix
name.title = title
return ParsedName(name)
def normalize_name(name):
"""Normalize name.
Args:
name (six.text_type): The name to be normalized.
Returns:
str: The normalized name.
"""
if not name or name.isspace():
return None
return ParsedName.loads(name).dumps()
def _generate_non_lastnames_variations(non_lastnames):
"""Generate variations for all non-lastnames.
E.g. For 'John Richard', this method generates: [
'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R',
]
"""
if not non_lastnames:
return []
# Generate name transformations in place for all non lastnames. Transformations include:
# 1. Drop non last name, 2. use initial, 3. use full non lastname
for idx, non_lastname in enumerate(non_lastnames):
non_lastnames[idx] = (u'', non_lastname[0], non_lastname)
# Generate the cartesian product of the transformed non lastnames and flatten them.
return [
(u' '.join(var_elem for var_elem in variation if var_elem)).strip()
for variation in product(*non_lastnames)
]
def _generate_lastnames_variations(lastnames):
"""Generate variations for lastnames.
Note:
This method follows the assumption that the first last name is the main one.
E.g. For 'Caro Estevez', this method generates: ['Caro', 'Caro Estevez'].
In the case the lastnames are dashed, it splits them in two.
"""
if not lastnames:
return []
split_lastnames = [split_lastname for lastname in lastnames for split_lastname in lastname.split('-')]
lastnames_variations = split_lastnames
if len(split_lastnames) > 1:
# Generate lastnames concatenation if there are more than one lastname after split.
lastnames_variations.append(u' '.join([lastname for lastname in split_lastnames]))
return lastnames_variations
def generate_name_variations(name):
"""Generate name variations for a given name.
Args:
name (six.text_type): The name whose variations are to be generated.
Returns:
list: All the name variations for the given name.
Notes:
Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map
both full names of authors in HEP records and user's input to the same space and thus make exact queries work.
"""
def _update_name_variations_with_product(set_a, set_b):
name_variations.update([
unidecode((names_variation[0] +
separator +
names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower()
for names_variation
in product(set_a, set_b)
for separator
in _LASTNAME_NON_LASTNAME_SEPARATORS
])
parsed_name = ParsedName.loads(name)
# Handle rare-case of single-name
if len(parsed_name) == 1:
return [parsed_name.dumps().lower()]
name_variations = set()
# We need to filter out empty entries, since HumanName for this name `Perelstein,, Maxim` returns a first_list with
# an empty string element.
non_lastnames = [
non_lastname
for non_lastname
in parsed_name.first_list + parsed_name.suffix_list
if non_lastname
]
# This is needed because due to erroneous data (e.g. having many authors in a single authors field) ends up
# requiring a lot of memory (due to combinatorial expansion of all non lastnames).
# The policy is to use the input as a name variation, since this data will have to be curated.
if len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD or len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD:
LOGGER.error('Skipping name variations generation - too many names in: "%s"', name)
return [name]
non_lastnames_variations = \
_generate_non_lastnames_variations(non_lastnames)
lastnames_variations = _generate_lastnames_variations(parsed_name.last_list)
# Create variations where lastnames comes first and is separated from non lastnames either by space or comma.
_update_name_variations_with_product(lastnames_variations, non_lastnames_variations)
# Second part of transformations - having the lastnames in the end.
_update_name_variations_with_product(non_lastnames_variations, lastnames_variations)
return list(name_variations)
def format_name(name, initials_only=False):
"""Format a schema-compliant name string in a human-friendy format.
This is a convenience wrapper around :ref:`ParsedName`, which should be
used instead if more features are needed.
Args:
name (str): The name to format, in pretty much any format.
initials_only (bool): ``True`` if we want the first names to be displayed with only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> format_name('Lieber, Stanley Martin')
u'Stanley Martin Lieber'
>>> format_name('Lieber, Stanley Martin', initials_only=True)
u'S. M. Lieber'
>>> format_name('Downey, Robert Jr.', initials_only=True)
u'R. Downey Jr.'
"""
return ParsedName.loads(name).pprint(initials_only)
|
inspirehep/inspire-utils
|
inspire_utils/name.py
|
_generate_non_lastnames_variations
|
python
|
def _generate_non_lastnames_variations(non_lastnames):
if not non_lastnames:
return []
# Generate name transformations in place for all non lastnames. Transformations include:
# 1. Drop non last name, 2. use initial, 3. use full non lastname
for idx, non_lastname in enumerate(non_lastnames):
non_lastnames[idx] = (u'', non_lastname[0], non_lastname)
# Generate the cartesian product of the transformed non lastnames and flatten them.
return [
(u' '.join(var_elem for var_elem in variation if var_elem)).strip()
for variation in product(*non_lastnames)
]
|
Generate variations for all non-lastnames.
E.g. For 'John Richard', this method generates: [
'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R',
]
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/name.py#L261-L280
| null |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from itertools import product
from nameparser import HumanName
from nameparser.config import Constants
import six
from unidecode import unidecode
from .logging import getStackTraceLogger
LOGGER = getStackTraceLogger(__name__)
_LASTNAME_NON_LASTNAME_SEPARATORS = [u' ', u', ']
_NAMES_MAX_NUMBER_THRESHOLD = 5
"""Threshold for skipping the combinatorial expansion of names (when generating name variations). """
def _prepare_nameparser_constants():
"""Prepare nameparser Constants.
Remove nameparser's titles and use our own and add as suffixes the roman numerals.
Configuration is the same for all names (i.e. instances).
"""
constants = Constants()
roman_numeral_suffixes = [u'v', u'vi', u'vii', u'viii', u'ix', u'x',
u'xii', u'xiii', u'xiv', u'xv']
titles = [u'Dr', u'Prof', u'Professor', u'Sir', u'Editor', u'Ed', u'Mr',
u'Mrs', u'Ms', u'Chair', u'Co-Chair', u'Chairs', u'co-Chairs']
constants.titles.remove(*constants.titles).add(*titles)
constants.suffix_not_acronyms.add(*roman_numeral_suffixes)
return constants
class ParsedName(object):
"""Class for representing a name.
After construction, the instance exposes the fields exposed by `HumanName` instance, i.e.
`title`, `first`, `middle`, `last`, `suffix`.
"""
constants = _prepare_nameparser_constants()
"""The default constants configuration for `HumanName` to use for parsing all names."""
def __init__(self, name, constants=None):
"""Create a ParsedName instance.
Args:
name (Union[str, HumanName]): The name to be parsed (must be non empty nor None).
constants (:class:`nameparser.config.Constants`): Configuration for `HumanName` instantiation.
(Can be None, if provided it overwrites the default one generated in
:method:`prepare_nameparser_constants`.)
"""
if not constants:
constants = ParsedName.constants
if isinstance(name, HumanName):
self._parsed_name = name
else:
self._parsed_name = HumanName(name, constants=constants)
self._parsed_name.capitalize()
def __iter__(self):
return self._parsed_name
def __len__(self):
return len(self._parsed_name)
def __repr__(self):
return repr(self._parsed_name)
def __str__(self):
return str(self._parsed_name)
@property
def first_initials(self):
return u' '.join(self.first_initials_list)
@property
def first(self):
return u'{} {}'.format(self._parsed_name.first, self._parsed_name.middle).strip()
@property
def first_initials_list(self):
names = self.first_list
return [(name[0] + u'.') for name in names]
@property
def first_list(self):
return self._parsed_name.first_list + self._parsed_name.middle_list
@property
def last(self):
return self._parsed_name.last
@property
def last_list(self):
return self._parsed_name.last_list
@property
def suffix(self):
return self._parsed_name.suffix
@property
def suffix_list(self):
return self._parsed_name.suffix_list
@classmethod
def loads(cls, name):
"""Load a parsed name from a string.
Raises:
TypeError: when name isn't a type of `six.string_types`.
ValueError: when name is empty or None.
"""
if not isinstance(name, six.string_types):
raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(
classname=cls.__name__, string_types=repr(six.string_types)
))
if not name or name.isspace():
raise ValueError('name must not be empty')
return cls(name)
def dumps(self):
"""Dump the name to string, after normalizing it."""
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name
def pprint(self, initials_only=False):
"""Pretty print the name.
Args:
initials_only (bool): ``True`` if we want the first names to be displayed with
only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> ParsedName('Lieber, Stanley Martin').pprint()
u'Stanley Martin Lieber'
>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)
u'S. M. Lieber'
>>> ParsedName('Downey, Robert Jr.').pprint(initials_only=True)
u'R. Downey Jr.'
"""
last_name = self.last
suffixes = ', ' + self.suffix if self.suffix else ''
if initials_only and last_name != u'':
first_names = self.first_initials
else:
first_names = self.first
return u'{} {}{}'.format(first_names, last_name, suffixes).strip()
@classmethod
def from_parts(
cls,
first=None,
last=None,
middle=None,
suffix=None,
title=None
):
name = HumanName()
name.first = first
name.middle = middle
name.last = last
name.suffix = suffix
name.title = title
return ParsedName(name)
def normalize_name(name):
"""Normalize name.
Args:
name (six.text_type): The name to be normalized.
Returns:
str: The normalized name.
"""
if not name or name.isspace():
return None
return ParsedName.loads(name).dumps()
def _generate_lastnames_variations(lastnames):
"""Generate variations for lastnames.
Note:
This method follows the assumption that the first last name is the main one.
E.g. For 'Caro Estevez', this method generates: ['Caro', 'Caro Estevez'].
In the case the lastnames are dashed, it splits them in two.
"""
if not lastnames:
return []
split_lastnames = [split_lastname for lastname in lastnames for split_lastname in lastname.split('-')]
lastnames_variations = split_lastnames
if len(split_lastnames) > 1:
# Generate lastnames concatenation if there are more than one lastname after split.
lastnames_variations.append(u' '.join([lastname for lastname in split_lastnames]))
return lastnames_variations
def generate_name_variations(name):
"""Generate name variations for a given name.
Args:
name (six.text_type): The name whose variations are to be generated.
Returns:
list: All the name variations for the given name.
Notes:
Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map
both full names of authors in HEP records and user's input to the same space and thus make exact queries work.
"""
def _update_name_variations_with_product(set_a, set_b):
name_variations.update([
unidecode((names_variation[0] +
separator +
names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower()
for names_variation
in product(set_a, set_b)
for separator
in _LASTNAME_NON_LASTNAME_SEPARATORS
])
parsed_name = ParsedName.loads(name)
# Handle rare-case of single-name
if len(parsed_name) == 1:
return [parsed_name.dumps().lower()]
name_variations = set()
# We need to filter out empty entries, since HumanName for this name `Perelstein,, Maxim` returns a first_list with
# an empty string element.
non_lastnames = [
non_lastname
for non_lastname
in parsed_name.first_list + parsed_name.suffix_list
if non_lastname
]
# This is needed because due to erroneous data (e.g. having many authors in a single authors field) ends up
# requiring a lot of memory (due to combinatorial expansion of all non lastnames).
# The policy is to use the input as a name variation, since this data will have to be curated.
if len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD or len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD:
LOGGER.error('Skipping name variations generation - too many names in: "%s"', name)
return [name]
non_lastnames_variations = \
_generate_non_lastnames_variations(non_lastnames)
lastnames_variations = _generate_lastnames_variations(parsed_name.last_list)
# Create variations where lastnames comes first and is separated from non lastnames either by space or comma.
_update_name_variations_with_product(lastnames_variations, non_lastnames_variations)
# Second part of transformations - having the lastnames in the end.
_update_name_variations_with_product(non_lastnames_variations, lastnames_variations)
return list(name_variations)
def format_name(name, initials_only=False):
"""Format a schema-compliant name string in a human-friendy format.
This is a convenience wrapper around :ref:`ParsedName`, which should be
used instead if more features are needed.
Args:
name (str): The name to format, in pretty much any format.
initials_only (bool): ``True`` if we want the first names to be displayed with only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> format_name('Lieber, Stanley Martin')
u'Stanley Martin Lieber'
>>> format_name('Lieber, Stanley Martin', initials_only=True)
u'S. M. Lieber'
>>> format_name('Downey, Robert Jr.', initials_only=True)
u'R. Downey Jr.'
"""
return ParsedName.loads(name).pprint(initials_only)
|
inspirehep/inspire-utils
|
inspire_utils/name.py
|
_generate_lastnames_variations
|
python
|
def _generate_lastnames_variations(lastnames):
if not lastnames:
return []
split_lastnames = [split_lastname for lastname in lastnames for split_lastname in lastname.split('-')]
lastnames_variations = split_lastnames
if len(split_lastnames) > 1:
# Generate lastnames concatenation if there are more than one lastname after split.
lastnames_variations.append(u' '.join([lastname for lastname in split_lastnames]))
return lastnames_variations
|
Generate variations for lastnames.
Note:
This method follows the assumption that the first last name is the main one.
E.g. For 'Caro Estevez', this method generates: ['Caro', 'Caro Estevez'].
In the case the lastnames are dashed, it splits them in two.
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/name.py#L283-L301
| null |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from itertools import product
from nameparser import HumanName
from nameparser.config import Constants
import six
from unidecode import unidecode
from .logging import getStackTraceLogger
LOGGER = getStackTraceLogger(__name__)
_LASTNAME_NON_LASTNAME_SEPARATORS = [u' ', u', ']
_NAMES_MAX_NUMBER_THRESHOLD = 5
"""Threshold for skipping the combinatorial expansion of names (when generating name variations). """
def _prepare_nameparser_constants():
"""Prepare nameparser Constants.
Remove nameparser's titles and use our own and add as suffixes the roman numerals.
Configuration is the same for all names (i.e. instances).
"""
constants = Constants()
roman_numeral_suffixes = [u'v', u'vi', u'vii', u'viii', u'ix', u'x',
u'xii', u'xiii', u'xiv', u'xv']
titles = [u'Dr', u'Prof', u'Professor', u'Sir', u'Editor', u'Ed', u'Mr',
u'Mrs', u'Ms', u'Chair', u'Co-Chair', u'Chairs', u'co-Chairs']
constants.titles.remove(*constants.titles).add(*titles)
constants.suffix_not_acronyms.add(*roman_numeral_suffixes)
return constants
class ParsedName(object):
"""Class for representing a name.
After construction, the instance exposes the fields exposed by `HumanName` instance, i.e.
`title`, `first`, `middle`, `last`, `suffix`.
"""
constants = _prepare_nameparser_constants()
"""The default constants configuration for `HumanName` to use for parsing all names."""
def __init__(self, name, constants=None):
"""Create a ParsedName instance.
Args:
name (Union[str, HumanName]): The name to be parsed (must be non empty nor None).
constants (:class:`nameparser.config.Constants`): Configuration for `HumanName` instantiation.
(Can be None, if provided it overwrites the default one generated in
:method:`prepare_nameparser_constants`.)
"""
if not constants:
constants = ParsedName.constants
if isinstance(name, HumanName):
self._parsed_name = name
else:
self._parsed_name = HumanName(name, constants=constants)
self._parsed_name.capitalize()
def __iter__(self):
return self._parsed_name
def __len__(self):
return len(self._parsed_name)
def __repr__(self):
return repr(self._parsed_name)
def __str__(self):
return str(self._parsed_name)
@property
def first_initials(self):
return u' '.join(self.first_initials_list)
@property
def first(self):
return u'{} {}'.format(self._parsed_name.first, self._parsed_name.middle).strip()
@property
def first_initials_list(self):
names = self.first_list
return [(name[0] + u'.') for name in names]
@property
def first_list(self):
return self._parsed_name.first_list + self._parsed_name.middle_list
@property
def last(self):
return self._parsed_name.last
@property
def last_list(self):
return self._parsed_name.last_list
@property
def suffix(self):
return self._parsed_name.suffix
@property
def suffix_list(self):
return self._parsed_name.suffix_list
@classmethod
def loads(cls, name):
"""Load a parsed name from a string.
Raises:
TypeError: when name isn't a type of `six.string_types`.
ValueError: when name is empty or None.
"""
if not isinstance(name, six.string_types):
raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(
classname=cls.__name__, string_types=repr(six.string_types)
))
if not name or name.isspace():
raise ValueError('name must not be empty')
return cls(name)
def dumps(self):
"""Dump the name to string, after normalizing it."""
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name
def pprint(self, initials_only=False):
"""Pretty print the name.
Args:
initials_only (bool): ``True`` if we want the first names to be displayed with
only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> ParsedName('Lieber, Stanley Martin').pprint()
u'Stanley Martin Lieber'
>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)
u'S. M. Lieber'
>>> ParsedName('Downey, Robert Jr.').pprint(initials_only=True)
u'R. Downey Jr.'
"""
last_name = self.last
suffixes = ', ' + self.suffix if self.suffix else ''
if initials_only and last_name != u'':
first_names = self.first_initials
else:
first_names = self.first
return u'{} {}{}'.format(first_names, last_name, suffixes).strip()
@classmethod
def from_parts(
cls,
first=None,
last=None,
middle=None,
suffix=None,
title=None
):
name = HumanName()
name.first = first
name.middle = middle
name.last = last
name.suffix = suffix
name.title = title
return ParsedName(name)
def normalize_name(name):
"""Normalize name.
Args:
name (six.text_type): The name to be normalized.
Returns:
str: The normalized name.
"""
if not name or name.isspace():
return None
return ParsedName.loads(name).dumps()
def _generate_non_lastnames_variations(non_lastnames):
"""Generate variations for all non-lastnames.
E.g. For 'John Richard', this method generates: [
'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R',
]
"""
if not non_lastnames:
return []
# Generate name transformations in place for all non lastnames. Transformations include:
# 1. Drop non last name, 2. use initial, 3. use full non lastname
for idx, non_lastname in enumerate(non_lastnames):
non_lastnames[idx] = (u'', non_lastname[0], non_lastname)
# Generate the cartesian product of the transformed non lastnames and flatten them.
return [
(u' '.join(var_elem for var_elem in variation if var_elem)).strip()
for variation in product(*non_lastnames)
]
def generate_name_variations(name):
"""Generate name variations for a given name.
Args:
name (six.text_type): The name whose variations are to be generated.
Returns:
list: All the name variations for the given name.
Notes:
Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map
both full names of authors in HEP records and user's input to the same space and thus make exact queries work.
"""
def _update_name_variations_with_product(set_a, set_b):
name_variations.update([
unidecode((names_variation[0] +
separator +
names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower()
for names_variation
in product(set_a, set_b)
for separator
in _LASTNAME_NON_LASTNAME_SEPARATORS
])
parsed_name = ParsedName.loads(name)
# Handle rare-case of single-name
if len(parsed_name) == 1:
return [parsed_name.dumps().lower()]
name_variations = set()
# We need to filter out empty entries, since HumanName for this name `Perelstein,, Maxim` returns a first_list with
# an empty string element.
non_lastnames = [
non_lastname
for non_lastname
in parsed_name.first_list + parsed_name.suffix_list
if non_lastname
]
# This is needed because due to erroneous data (e.g. having many authors in a single authors field) ends up
# requiring a lot of memory (due to combinatorial expansion of all non lastnames).
# The policy is to use the input as a name variation, since this data will have to be curated.
if len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD or len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD:
LOGGER.error('Skipping name variations generation - too many names in: "%s"', name)
return [name]
non_lastnames_variations = \
_generate_non_lastnames_variations(non_lastnames)
lastnames_variations = _generate_lastnames_variations(parsed_name.last_list)
# Create variations where lastnames comes first and is separated from non lastnames either by space or comma.
_update_name_variations_with_product(lastnames_variations, non_lastnames_variations)
# Second part of transformations - having the lastnames in the end.
_update_name_variations_with_product(non_lastnames_variations, lastnames_variations)
return list(name_variations)
def format_name(name, initials_only=False):
"""Format a schema-compliant name string in a human-friendy format.
This is a convenience wrapper around :ref:`ParsedName`, which should be
used instead if more features are needed.
Args:
name (str): The name to format, in pretty much any format.
initials_only (bool): ``True`` if we want the first names to be displayed with only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> format_name('Lieber, Stanley Martin')
u'Stanley Martin Lieber'
>>> format_name('Lieber, Stanley Martin', initials_only=True)
u'S. M. Lieber'
>>> format_name('Downey, Robert Jr.', initials_only=True)
u'R. Downey Jr.'
"""
return ParsedName.loads(name).pprint(initials_only)
|
inspirehep/inspire-utils
|
inspire_utils/name.py
|
generate_name_variations
|
python
|
def generate_name_variations(name):
def _update_name_variations_with_product(set_a, set_b):
name_variations.update([
unidecode((names_variation[0] +
separator +
names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower()
for names_variation
in product(set_a, set_b)
for separator
in _LASTNAME_NON_LASTNAME_SEPARATORS
])
parsed_name = ParsedName.loads(name)
# Handle rare-case of single-name
if len(parsed_name) == 1:
return [parsed_name.dumps().lower()]
name_variations = set()
# We need to filter out empty entries, since HumanName for this name `Perelstein,, Maxim` returns a first_list with
# an empty string element.
non_lastnames = [
non_lastname
for non_lastname
in parsed_name.first_list + parsed_name.suffix_list
if non_lastname
]
# This is needed because due to erroneous data (e.g. having many authors in a single authors field) ends up
# requiring a lot of memory (due to combinatorial expansion of all non lastnames).
# The policy is to use the input as a name variation, since this data will have to be curated.
if len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD or len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD:
LOGGER.error('Skipping name variations generation - too many names in: "%s"', name)
return [name]
non_lastnames_variations = \
_generate_non_lastnames_variations(non_lastnames)
lastnames_variations = _generate_lastnames_variations(parsed_name.last_list)
# Create variations where lastnames comes first and is separated from non lastnames either by space or comma.
_update_name_variations_with_product(lastnames_variations, non_lastnames_variations)
# Second part of transformations - having the lastnames in the end.
_update_name_variations_with_product(non_lastnames_variations, lastnames_variations)
return list(name_variations)
|
Generate name variations for a given name.
Args:
name (six.text_type): The name whose variations are to be generated.
Returns:
list: All the name variations for the given name.
Notes:
Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map
both full names of authors in HEP records and user's input to the same space and thus make exact queries work.
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/name.py#L304-L362
|
[
"def _generate_non_lastnames_variations(non_lastnames):\n \"\"\"Generate variations for all non-lastnames.\n\n E.g. For 'John Richard', this method generates: [\n 'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R',\n ]\n \"\"\"\n if not non_lastnames:\n return []\n\n # Generate name transformations in place for all non lastnames. Transformations include:\n # 1. Drop non last name, 2. use initial, 3. use full non lastname\n for idx, non_lastname in enumerate(non_lastnames):\n non_lastnames[idx] = (u'', non_lastname[0], non_lastname)\n\n # Generate the cartesian product of the transformed non lastnames and flatten them.\n return [\n (u' '.join(var_elem for var_elem in variation if var_elem)).strip()\n for variation in product(*non_lastnames)\n ]\n",
"def _generate_lastnames_variations(lastnames):\n \"\"\"Generate variations for lastnames.\n\n Note:\n This method follows the assumption that the first last name is the main one.\n E.g. For 'Caro Estevez', this method generates: ['Caro', 'Caro Estevez'].\n In the case the lastnames are dashed, it splits them in two.\n \"\"\"\n if not lastnames:\n return []\n\n split_lastnames = [split_lastname for lastname in lastnames for split_lastname in lastname.split('-')]\n\n lastnames_variations = split_lastnames\n if len(split_lastnames) > 1:\n # Generate lastnames concatenation if there are more than one lastname after split.\n lastnames_variations.append(u' '.join([lastname for lastname in split_lastnames]))\n\n return lastnames_variations\n",
"def error(self, message, *args, **kwargs):\n \"\"\"Log error with stack trace and locals information.\n\n By default, enables stack trace information in logging messages, so that stacktrace and locals appear in Sentry.\n \"\"\"\n kwargs.setdefault('extra', {}).setdefault('stack', True)\n return self.logger.error(message, *args, **kwargs)\n",
"def loads(cls, name):\n \"\"\"Load a parsed name from a string.\n\n Raises:\n TypeError: when name isn't a type of `six.string_types`.\n ValueError: when name is empty or None.\n \"\"\"\n if not isinstance(name, six.string_types):\n raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(\n classname=cls.__name__, string_types=repr(six.string_types)\n ))\n if not name or name.isspace():\n raise ValueError('name must not be empty')\n\n return cls(name)\n",
"def dumps(self):\n \"\"\"Dump the name to string, after normalizing it.\"\"\"\n def _is_initial(author_name):\n return len(author_name) == 1 or u'.' in author_name\n\n def _ensure_dotted_initials(author_name):\n if _is_initial(author_name) \\\n and u'.' not in author_name:\n seq = (author_name, u'.')\n author_name = u''.join(seq)\n return author_name\n\n def _ensure_dotted_suffixes(author_suffix):\n if u'.' not in author_suffix:\n seq = (author_suffix, u'.')\n author_suffix = u''.join(seq)\n return author_suffix\n\n def _is_roman_numeral(suffix):\n \"\"\"Controls that the user's input only contains valid roman numerals\"\"\"\n valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',\n u'V', u'I', u'(', u')']\n return all(letters in valid_roman_numerals\n for letters in suffix.upper())\n\n first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)\n try:\n prev = next(first_and_middle_names)\n except StopIteration:\n LOGGER.warning(u\"Cannot process %s properly\",\n self._parsed_name.original)\n prev = self._parsed_name.original\n names_with_spaces = [prev]\n\n for name in first_and_middle_names:\n if not _is_initial(name) or not _is_initial(prev):\n names_with_spaces.append(' ')\n prev = name\n names_with_spaces.append(prev)\n\n normalized_names = u''.join(names_with_spaces)\n\n if _is_roman_numeral(self.suffix):\n suffix = self.suffix.upper()\n else:\n suffix = _ensure_dotted_suffixes(self.suffix)\n\n final_name = u', '.join(\n part for part in (self.last, normalized_names.strip(), suffix)\n if part)\n\n # Replace unicode curly apostrophe to normal apostrophe.\n final_name = final_name.replace(u'’', '\\'')\n\n return final_name\n",
"def _update_name_variations_with_product(set_a, set_b):\n name_variations.update([\n unidecode((names_variation[0] +\n separator +\n names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower()\n for names_variation\n in product(set_a, set_b)\n for separator\n in _LASTNAME_NON_LASTNAME_SEPARATORS\n ])\n"
] |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from itertools import product
from nameparser import HumanName
from nameparser.config import Constants
import six
from unidecode import unidecode
from .logging import getStackTraceLogger
LOGGER = getStackTraceLogger(__name__)
_LASTNAME_NON_LASTNAME_SEPARATORS = [u' ', u', ']
_NAMES_MAX_NUMBER_THRESHOLD = 5
"""Threshold for skipping the combinatorial expansion of names (when generating name variations). """
def _prepare_nameparser_constants():
"""Prepare nameparser Constants.
Remove nameparser's titles and use our own and add as suffixes the roman numerals.
Configuration is the same for all names (i.e. instances).
"""
constants = Constants()
roman_numeral_suffixes = [u'v', u'vi', u'vii', u'viii', u'ix', u'x',
u'xii', u'xiii', u'xiv', u'xv']
titles = [u'Dr', u'Prof', u'Professor', u'Sir', u'Editor', u'Ed', u'Mr',
u'Mrs', u'Ms', u'Chair', u'Co-Chair', u'Chairs', u'co-Chairs']
constants.titles.remove(*constants.titles).add(*titles)
constants.suffix_not_acronyms.add(*roman_numeral_suffixes)
return constants
class ParsedName(object):
"""Class for representing a name.
After construction, the instance exposes the fields exposed by `HumanName` instance, i.e.
`title`, `first`, `middle`, `last`, `suffix`.
"""
constants = _prepare_nameparser_constants()
"""The default constants configuration for `HumanName` to use for parsing all names."""
def __init__(self, name, constants=None):
"""Create a ParsedName instance.
Args:
name (Union[str, HumanName]): The name to be parsed (must be non empty nor None).
constants (:class:`nameparser.config.Constants`): Configuration for `HumanName` instantiation.
(Can be None, if provided it overwrites the default one generated in
:method:`prepare_nameparser_constants`.)
"""
if not constants:
constants = ParsedName.constants
if isinstance(name, HumanName):
self._parsed_name = name
else:
self._parsed_name = HumanName(name, constants=constants)
self._parsed_name.capitalize()
def __iter__(self):
return self._parsed_name
def __len__(self):
return len(self._parsed_name)
def __repr__(self):
return repr(self._parsed_name)
def __str__(self):
return str(self._parsed_name)
@property
def first_initials(self):
return u' '.join(self.first_initials_list)
@property
def first(self):
return u'{} {}'.format(self._parsed_name.first, self._parsed_name.middle).strip()
@property
def first_initials_list(self):
names = self.first_list
return [(name[0] + u'.') for name in names]
@property
def first_list(self):
return self._parsed_name.first_list + self._parsed_name.middle_list
@property
def last(self):
return self._parsed_name.last
@property
def last_list(self):
return self._parsed_name.last_list
@property
def suffix(self):
return self._parsed_name.suffix
@property
def suffix_list(self):
return self._parsed_name.suffix_list
@classmethod
def loads(cls, name):
"""Load a parsed name from a string.
Raises:
TypeError: when name isn't a type of `six.string_types`.
ValueError: when name is empty or None.
"""
if not isinstance(name, six.string_types):
raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(
classname=cls.__name__, string_types=repr(six.string_types)
))
if not name or name.isspace():
raise ValueError('name must not be empty')
return cls(name)
def dumps(self):
"""Dump the name to string, after normalizing it."""
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name
def pprint(self, initials_only=False):
"""Pretty print the name.
Args:
initials_only (bool): ``True`` if we want the first names to be displayed with
only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> ParsedName('Lieber, Stanley Martin').pprint()
u'Stanley Martin Lieber'
>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)
u'S. M. Lieber'
>>> ParsedName('Downey, Robert Jr.').pprint(initials_only=True)
u'R. Downey Jr.'
"""
last_name = self.last
suffixes = ', ' + self.suffix if self.suffix else ''
if initials_only and last_name != u'':
first_names = self.first_initials
else:
first_names = self.first
return u'{} {}{}'.format(first_names, last_name, suffixes).strip()
@classmethod
def from_parts(
cls,
first=None,
last=None,
middle=None,
suffix=None,
title=None
):
name = HumanName()
name.first = first
name.middle = middle
name.last = last
name.suffix = suffix
name.title = title
return ParsedName(name)
def normalize_name(name):
"""Normalize name.
Args:
name (six.text_type): The name to be normalized.
Returns:
str: The normalized name.
"""
if not name or name.isspace():
return None
return ParsedName.loads(name).dumps()
def _generate_non_lastnames_variations(non_lastnames):
"""Generate variations for all non-lastnames.
E.g. For 'John Richard', this method generates: [
'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R',
]
"""
if not non_lastnames:
return []
# Generate name transformations in place for all non lastnames. Transformations include:
# 1. Drop non last name, 2. use initial, 3. use full non lastname
for idx, non_lastname in enumerate(non_lastnames):
non_lastnames[idx] = (u'', non_lastname[0], non_lastname)
# Generate the cartesian product of the transformed non lastnames and flatten them.
return [
(u' '.join(var_elem for var_elem in variation if var_elem)).strip()
for variation in product(*non_lastnames)
]
def _generate_lastnames_variations(lastnames):
"""Generate variations for lastnames.
Note:
This method follows the assumption that the first last name is the main one.
E.g. For 'Caro Estevez', this method generates: ['Caro', 'Caro Estevez'].
In the case the lastnames are dashed, it splits them in two.
"""
if not lastnames:
return []
split_lastnames = [split_lastname for lastname in lastnames for split_lastname in lastname.split('-')]
lastnames_variations = split_lastnames
if len(split_lastnames) > 1:
# Generate lastnames concatenation if there are more than one lastname after split.
lastnames_variations.append(u' '.join([lastname for lastname in split_lastnames]))
return lastnames_variations
def format_name(name, initials_only=False):
"""Format a schema-compliant name string in a human-friendy format.
This is a convenience wrapper around :ref:`ParsedName`, which should be
used instead if more features are needed.
Args:
name (str): The name to format, in pretty much any format.
initials_only (bool): ``True`` if we want the first names to be displayed with only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> format_name('Lieber, Stanley Martin')
u'Stanley Martin Lieber'
>>> format_name('Lieber, Stanley Martin', initials_only=True)
u'S. M. Lieber'
>>> format_name('Downey, Robert Jr.', initials_only=True)
u'R. Downey Jr.'
"""
return ParsedName.loads(name).pprint(initials_only)
|
inspirehep/inspire-utils
|
inspire_utils/name.py
|
ParsedName.loads
|
python
|
def loads(cls, name):
if not isinstance(name, six.string_types):
raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(
classname=cls.__name__, string_types=repr(six.string_types)
))
if not name or name.isspace():
raise ValueError('name must not be empty')
return cls(name)
|
Load a parsed name from a string.
Raises:
TypeError: when name isn't a type of `six.string_types`.
ValueError: when name is empty or None.
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/name.py#L130-L144
| null |
class ParsedName(object):
"""Class for representing a name.
After construction, the instance exposes the fields exposed by `HumanName` instance, i.e.
`title`, `first`, `middle`, `last`, `suffix`.
"""
constants = _prepare_nameparser_constants()
"""The default constants configuration for `HumanName` to use for parsing all names."""
def __init__(self, name, constants=None):
"""Create a ParsedName instance.
Args:
name (Union[str, HumanName]): The name to be parsed (must be non empty nor None).
constants (:class:`nameparser.config.Constants`): Configuration for `HumanName` instantiation.
(Can be None, if provided it overwrites the default one generated in
:method:`prepare_nameparser_constants`.)
"""
if not constants:
constants = ParsedName.constants
if isinstance(name, HumanName):
self._parsed_name = name
else:
self._parsed_name = HumanName(name, constants=constants)
self._parsed_name.capitalize()
def __iter__(self):
return self._parsed_name
def __len__(self):
return len(self._parsed_name)
def __repr__(self):
return repr(self._parsed_name)
def __str__(self):
return str(self._parsed_name)
@property
def first_initials(self):
return u' '.join(self.first_initials_list)
@property
def first(self):
return u'{} {}'.format(self._parsed_name.first, self._parsed_name.middle).strip()
@property
def first_initials_list(self):
names = self.first_list
return [(name[0] + u'.') for name in names]
@property
def first_list(self):
return self._parsed_name.first_list + self._parsed_name.middle_list
@property
def last(self):
return self._parsed_name.last
@property
def last_list(self):
return self._parsed_name.last_list
@property
def suffix(self):
return self._parsed_name.suffix
@property
def suffix_list(self):
return self._parsed_name.suffix_list
@classmethod
def dumps(self):
"""Dump the name to string, after normalizing it."""
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name
def pprint(self, initials_only=False):
"""Pretty print the name.
Args:
initials_only (bool): ``True`` if we want the first names to be displayed with
only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> ParsedName('Lieber, Stanley Martin').pprint()
u'Stanley Martin Lieber'
>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)
u'S. M. Lieber'
>>> ParsedName('Downey, Robert Jr.').pprint(initials_only=True)
u'R. Downey Jr.'
"""
last_name = self.last
suffixes = ', ' + self.suffix if self.suffix else ''
if initials_only and last_name != u'':
first_names = self.first_initials
else:
first_names = self.first
return u'{} {}{}'.format(first_names, last_name, suffixes).strip()
@classmethod
def from_parts(
cls,
first=None,
last=None,
middle=None,
suffix=None,
title=None
):
name = HumanName()
name.first = first
name.middle = middle
name.last = last
name.suffix = suffix
name.title = title
return ParsedName(name)
|
inspirehep/inspire-utils
|
inspire_utils/name.py
|
ParsedName.dumps
|
python
|
def dumps(self):
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name
|
Dump the name to string, after normalizing it.
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/name.py#L146-L200
|
[
"def _is_initial(author_name):\n return len(author_name) == 1 or u'.' in author_name\n",
"def _ensure_dotted_suffixes(author_suffix):\n if u'.' not in author_suffix:\n seq = (author_suffix, u'.')\n author_suffix = u''.join(seq)\n return author_suffix\n",
"def _is_roman_numeral(suffix):\n \"\"\"Controls that the user's input only contains valid roman numerals\"\"\"\n valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',\n u'V', u'I', u'(', u')']\n return all(letters in valid_roman_numerals\n for letters in suffix.upper())\n"
] |
class ParsedName(object):
"""Class for representing a name.
After construction, the instance exposes the fields exposed by `HumanName` instance, i.e.
`title`, `first`, `middle`, `last`, `suffix`.
"""
constants = _prepare_nameparser_constants()
"""The default constants configuration for `HumanName` to use for parsing all names."""
def __init__(self, name, constants=None):
"""Create a ParsedName instance.
Args:
name (Union[str, HumanName]): The name to be parsed (must be non empty nor None).
constants (:class:`nameparser.config.Constants`): Configuration for `HumanName` instantiation.
(Can be None, if provided it overwrites the default one generated in
:method:`prepare_nameparser_constants`.)
"""
if not constants:
constants = ParsedName.constants
if isinstance(name, HumanName):
self._parsed_name = name
else:
self._parsed_name = HumanName(name, constants=constants)
self._parsed_name.capitalize()
def __iter__(self):
return self._parsed_name
def __len__(self):
return len(self._parsed_name)
def __repr__(self):
return repr(self._parsed_name)
def __str__(self):
return str(self._parsed_name)
@property
def first_initials(self):
return u' '.join(self.first_initials_list)
@property
def first(self):
return u'{} {}'.format(self._parsed_name.first, self._parsed_name.middle).strip()
@property
def first_initials_list(self):
names = self.first_list
return [(name[0] + u'.') for name in names]
@property
def first_list(self):
return self._parsed_name.first_list + self._parsed_name.middle_list
@property
def last(self):
return self._parsed_name.last
@property
def last_list(self):
return self._parsed_name.last_list
@property
def suffix(self):
return self._parsed_name.suffix
@property
def suffix_list(self):
return self._parsed_name.suffix_list
@classmethod
def loads(cls, name):
"""Load a parsed name from a string.
Raises:
TypeError: when name isn't a type of `six.string_types`.
ValueError: when name is empty or None.
"""
if not isinstance(name, six.string_types):
raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(
classname=cls.__name__, string_types=repr(six.string_types)
))
if not name or name.isspace():
raise ValueError('name must not be empty')
return cls(name)
def pprint(self, initials_only=False):
"""Pretty print the name.
Args:
initials_only (bool): ``True`` if we want the first names to be displayed with
only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> ParsedName('Lieber, Stanley Martin').pprint()
u'Stanley Martin Lieber'
>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)
u'S. M. Lieber'
>>> ParsedName('Downey, Robert Jr.').pprint(initials_only=True)
u'R. Downey Jr.'
"""
last_name = self.last
suffixes = ', ' + self.suffix if self.suffix else ''
if initials_only and last_name != u'':
first_names = self.first_initials
else:
first_names = self.first
return u'{} {}{}'.format(first_names, last_name, suffixes).strip()
@classmethod
def from_parts(
cls,
first=None,
last=None,
middle=None,
suffix=None,
title=None
):
name = HumanName()
name.first = first
name.middle = middle
name.last = last
name.suffix = suffix
name.title = title
return ParsedName(name)
|
inspirehep/inspire-utils
|
inspire_utils/name.py
|
ParsedName.pprint
|
python
|
def pprint(self, initials_only=False):
last_name = self.last
suffixes = ', ' + self.suffix if self.suffix else ''
if initials_only and last_name != u'':
first_names = self.first_initials
else:
first_names = self.first
return u'{} {}{}'.format(first_names, last_name, suffixes).strip()
|
Pretty print the name.
Args:
initials_only (bool): ``True`` if we want the first names to be displayed with
only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> ParsedName('Lieber, Stanley Martin').pprint()
u'Stanley Martin Lieber'
>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)
u'S. M. Lieber'
>>> ParsedName('Downey, Robert Jr.').pprint(initials_only=True)
u'R. Downey Jr.'
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/name.py#L202-L226
| null |
class ParsedName(object):
"""Class for representing a name.
After construction, the instance exposes the fields exposed by `HumanName` instance, i.e.
`title`, `first`, `middle`, `last`, `suffix`.
"""
constants = _prepare_nameparser_constants()
"""The default constants configuration for `HumanName` to use for parsing all names."""
def __init__(self, name, constants=None):
"""Create a ParsedName instance.
Args:
name (Union[str, HumanName]): The name to be parsed (must be non empty nor None).
constants (:class:`nameparser.config.Constants`): Configuration for `HumanName` instantiation.
(Can be None, if provided it overwrites the default one generated in
:method:`prepare_nameparser_constants`.)
"""
if not constants:
constants = ParsedName.constants
if isinstance(name, HumanName):
self._parsed_name = name
else:
self._parsed_name = HumanName(name, constants=constants)
self._parsed_name.capitalize()
def __iter__(self):
return self._parsed_name
def __len__(self):
return len(self._parsed_name)
def __repr__(self):
return repr(self._parsed_name)
def __str__(self):
return str(self._parsed_name)
@property
def first_initials(self):
return u' '.join(self.first_initials_list)
@property
def first(self):
return u'{} {}'.format(self._parsed_name.first, self._parsed_name.middle).strip()
@property
def first_initials_list(self):
names = self.first_list
return [(name[0] + u'.') for name in names]
@property
def first_list(self):
return self._parsed_name.first_list + self._parsed_name.middle_list
@property
def last(self):
return self._parsed_name.last
@property
def last_list(self):
return self._parsed_name.last_list
@property
def suffix(self):
return self._parsed_name.suffix
@property
def suffix_list(self):
return self._parsed_name.suffix_list
@classmethod
def loads(cls, name):
"""Load a parsed name from a string.
Raises:
TypeError: when name isn't a type of `six.string_types`.
ValueError: when name is empty or None.
"""
if not isinstance(name, six.string_types):
raise TypeError(u'arguments to {classname} must be of type {string_types}'.format(
classname=cls.__name__, string_types=repr(six.string_types)
))
if not name or name.isspace():
raise ValueError('name must not be empty')
return cls(name)
def dumps(self):
"""Dump the name to string, after normalizing it."""
def _is_initial(author_name):
return len(author_name) == 1 or u'.' in author_name
def _ensure_dotted_initials(author_name):
if _is_initial(author_name) \
and u'.' not in author_name:
seq = (author_name, u'.')
author_name = u''.join(seq)
return author_name
def _ensure_dotted_suffixes(author_suffix):
if u'.' not in author_suffix:
seq = (author_suffix, u'.')
author_suffix = u''.join(seq)
return author_suffix
def _is_roman_numeral(suffix):
"""Controls that the user's input only contains valid roman numerals"""
valid_roman_numerals = [u'M', u'D', u'C', u'L', u'X',
u'V', u'I', u'(', u')']
return all(letters in valid_roman_numerals
for letters in suffix.upper())
first_and_middle_names = iter(_ensure_dotted_initials(name) for name in self.first_list)
try:
prev = next(first_and_middle_names)
except StopIteration:
LOGGER.warning(u"Cannot process %s properly",
self._parsed_name.original)
prev = self._parsed_name.original
names_with_spaces = [prev]
for name in first_and_middle_names:
if not _is_initial(name) or not _is_initial(prev):
names_with_spaces.append(' ')
prev = name
names_with_spaces.append(prev)
normalized_names = u''.join(names_with_spaces)
if _is_roman_numeral(self.suffix):
suffix = self.suffix.upper()
else:
suffix = _ensure_dotted_suffixes(self.suffix)
final_name = u', '.join(
part for part in (self.last, normalized_names.strip(), suffix)
if part)
# Replace unicode curly apostrophe to normal apostrophe.
final_name = final_name.replace(u'’', '\'')
return final_name
@classmethod
def from_parts(
cls,
first=None,
last=None,
middle=None,
suffix=None,
title=None
):
name = HumanName()
name.first = first
name.middle = middle
name.last = last
name.suffix = suffix
name.title = title
return ParsedName(name)
|
inspirehep/inspire-utils
|
inspire_utils/date.py
|
earliest_date
|
python
|
def earliest_date(dates, full_date=False):
min_date = min(PartialDate.loads(date) for date in dates)
if not min_date.month and full_date:
min_date.month = 1
if not min_date.day and full_date:
min_date.day = 1
return min_date.dumps()
|
Return the earliest among the schema-compliant dates.
This is a convenience wrapper around :ref:`PartialDate`, which should be
used instead if more features are needed.
Args:
dates(list): List of dates from which oldest/earliest one will be returned
full_date(bool): Adds month and/or day as "01" if they are missing
Returns:
str: Earliest date from provided list
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/date.py#L244-L261
| null |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Utils to handle dates in INSPIRE."""
from __future__ import absolute_import, division, print_function
import datetime
import itertools
from functools import total_ordering
import six
from babel import dates
from dateutil.parser import parse as parse_date
@total_ordering
@six.python_2_unicode_compatible
class PartialDate(object):
"""Class for representing a partial date.
The standard constructor assumes that all date parts are known (or not
present) and have already been converted to `int` s. For more flexibility,
see :ref:`PartialDate.from_parts` and :ref:`PartialDate.parse`.
Two `PartialDate` s can be compared and a more complete date is considered
smaller than the same date with parts removed.
Raises:
TypeError: when the date parts are not `int` s or `None`.
ValueError: when the date is not valid.
"""
def __init__(self, year, month=None, day=None):
well_typed = all(isinstance(part, int) or part is None for part in (year, month, day))
if not well_typed:
raise TypeError(u'arguments to {classname} must be of type int or None'.format(
classname=type(self).__name__))
if year is None or year < 1000:
raise ValueError('year must be an int >= 1000')
if day and not month:
raise TypeError('month must not be None if day is not None')
# delegate validation of number of months/days to datetime
completion = (part or 1 for part in (year, month, day))
datetime.date(*completion)
self.year = year
self.month = month
self.day = day
def __repr__(self):
return u'PartialDate(year={self.year}, month={self.month}, day={self.day})'.format(self=self)
def __eq__(self, other):
return self.year == other.year and self.month == other.month and self.day == other.day
def __lt__(self, other):
self_month = self.month or 99
self_day = self.day or 99
other_month = other.month or 99
other_day = other.day or 99
return (self.year, self_month, self_day) < (other.year, other_month, other_day)
def __str__(self):
return self.pprint()
@classmethod
def loads(cls, string):
"""Load a date from a string in a record.
This can also be used to validate a date.
Examples:
>>> PartialDate.loads('1686-06')
PartialDate(year=1686, month=6, day=None)
>>> PartialDate.loads('1686-42')
Traceback (most recent call last):
...
ValueError: month must be in 1..12
"""
parts = (int(part) for part in string.split('-'))
return cls(*parts)
def dumps(self):
"""Dump the date for serialization into the record.
Returns:
str: normalized date, in the form ``YYYY-MM-DD``, ``YYYY-MM`` or
``YYYY`` (depending on the information present in the date)
"""
non_empty = itertools.takewhile(bool, (self.year, self.month, self.day))
# XXX: this only handles dates after 1000, which should be sufficient
formatted = (u'{:02d}'.format(part) for part in non_empty)
date = '-'.join(formatted)
return date
@classmethod
def parse(cls, date, **kwargs):
"""Parse a date given in arbitrary format.
This attempts to parse the input date, given in an arbitrary format
Args:
date(str): date to normalize
**kwargs: these are passed to the `dateutil.parser.parse` function
which is used internally to parse the date. Most notably, the
`yearfirst` and `datefirst` flags can be used if the ordering
of the date parts is known.
Returns:
PartialDate: an object holding the parsed date.
Raises:
ValueError: when the date cannot be parsed or no year is present.
Examples:
>>> PartialDate.parse('30 Jun 1686')
PartialDate(year=1686, month=6, day=30)
"""
# In order to detect partial dates, parse twice with different defaults
# and compare the results.
default_date1 = datetime.datetime(1, 1, 1)
default_date2 = datetime.datetime(2, 2, 2)
parsed_date1 = parse_date(date, default=default_date1, **kwargs)
parsed_date2 = parse_date(date, default=default_date2, **kwargs)
has_year = parsed_date1.year == parsed_date2.year
has_month = parsed_date1.month == parsed_date2.month
has_day = parsed_date1.day == parsed_date2.day
if has_year:
year = parsed_date1.year
else:
raise ValueError('date does not contain a year')
month = parsed_date1.month if has_month else None
day = parsed_date1.day if has_day else None
return cls(year, month, day)
@classmethod
def from_parts(cls, year, month=None, day=None):
"""Build a PartialDate from its parts.
Unlike the standard constructor, the parts don't have to be `int` s but
can be strings containing textual month information.
Examples:
>>> PartialDate.from_parts('1686', 'June', '30')
PartialDate(year=1686, month=6, day=30)
"""
# XXX: 0 is not a valid year/month/day
non_empty = itertools.takewhile(
bool, (str(part) if part else None for part in (year, month, day))
)
return cls.parse(u'-'.join(non_empty), yearfirst=True)
def pprint(self):
"""Pretty print the date.
Examples:
>>> PartialDate(1686, 6, 30).pprint()
u'Jun 30, 1686'
"""
if not self.month:
return dates.format_date(datetime.date(self.year, 1, 1), 'YYYY', locale='en')
if not self.day:
return dates.format_date(datetime.date(self.year, self.month, 1), 'MMM, YYYY', locale='en')
return dates.format_date(datetime.date(self.year, self.month, self.day), 'MMM d, YYYY', locale='en')
def normalize_date(date, **kwargs):
"""Normalize a date to the be schema-compliant.
This is a convenience wrapper around :ref:`PartialDate`, which should be
used instead if more features are needed.
Note:
When ``date`` is ``None`` this returns ``None`` instead of raising
an exception because this makes ``DoJSON``'s code simpler, as it
already knows how to strip ``None`` values at the end.
Args:
date(str): date to normalize
**kwargs: these are passed to the `dateutil.parser.parse` function
that is used internally to parse the date. Most notably, the
`yearfirst` and `datefirst` flags can be used if the ordering
of the date parts is know.
Returns:
str: normalized date, in the form ``YYYY-MM-DD``, ``YYYY-MM`` or
``YYYY`` (depending on the information present in the date).
Raises:
ValueError: when the date cannot be parsed or no year is present.
Examples:
>>> normalize_date(None)
>>> normalize_date('30 Jun 1686')
'1686-06-30'
"""
if date is None:
return
return PartialDate.parse(date, **kwargs).dumps()
def format_date(date):
"""Format a schema-compliant date string in a human-friendy format.
This is a convenience wrapper around :ref:`PartialDate`, which should be
used instead if more features are needed.
"""
return PartialDate.loads(date).pprint()
|
inspirehep/inspire-utils
|
inspire_utils/urls.py
|
ensure_scheme
|
python
|
def ensure_scheme(url, default_scheme='http'):
parsed = urlsplit(url, scheme=default_scheme)
if not parsed.netloc:
parsed = SplitResult(
scheme=parsed.scheme,
netloc=parsed.path,
path='',
query=parsed.query,
fragment=parsed.fragment
)
return urlunsplit(parsed)
|
Adds a scheme to a url if not present.
Args:
url (string): a url, assumed to start with netloc
default_scheme (string): a scheme to be added
Returns:
string: URL with a scheme
|
train
|
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/urls.py#L31-L51
| null |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2018 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""URL-related utils."""
from __future__ import absolute_import, division, print_function
from six import text_type
from six.moves.urllib.parse import urlsplit, urlunsplit, SplitResult
def record_url_by_pattern(pattern, recid):
"""Get a URL to a record constructing it from a pattern.
Args:
pattern (string): a URL pattern as a format-friendly string with a
`recid` field
recid (Union[string, int]): record ID
Returns:
string: built record URL
"""
return text_type(ensure_scheme(pattern)).format(recid=recid)
|
qwilka/vn-tree
|
vntree/node.py
|
Node.add_child
|
python
|
def add_child(self, node):
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
|
Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L142-L154
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.remove_child
|
python
|
def remove_child(self, idx=None, *, name=None, node=None):
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
|
Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L164-L191
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node._path
|
python
|
def _path(self):
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
|
Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L194-L213
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node._coord
|
python
|
def _coord(self):
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
|
Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L216-L235
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.set_data
|
python
|
def set_data(self, *keys, value):
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
|
Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L275-L292
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node._root
|
python
|
def _root(self):
_n = self
while _n.parent:
_n = _n.parent
return _n
|
Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L296-L305
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node._ancestors
|
python
|
def _ancestors(self):
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
|
Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L309-L322
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.get_child_by_name
|
python
|
def get_child_by_name(self, childname):
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
|
Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L325-L340
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.get_node_by_path
|
python
|
def get_node_by_path(self, path):
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
|
Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L343-L376
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.get_node_by_coord
|
python
|
def get_node_by_coord(self, coord, relative=False):
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
|
Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L379-L402
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.find_one_node
|
python
|
def find_one_node(self, *keys, value, decend=True):
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
|
Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L405-L432
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.to_texttree
|
python
|
def to_texttree(self, indent=3, func=True, symbol='ascii'):
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
|
Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L435-L507
|
[
"func = lambda n: \"{}\".format(n.name)\n"
] |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.tree_compare
|
python
|
def tree_compare(self, othertree, vntree_meta=False):
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
|
Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L553-L570
|
[
"def to_treedict(self, recursive=True, vntree_meta=True):\n # NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)\n _dct = {k:v for k, v in vars(self).items() if k not in [\"parent\", \"childs\"]}\n if not vntree_meta and \"_vntree_meta\" in _dct[\"data\"]:\n _dct[\"data\"].pop(\"_vntree_meta\")\n if recursive and self.childs:\n _dct[\"childs\"] = []\n for _child in self.childs:\n _dct[\"childs\"].append( _child.to_treedict(recursive=recursive) )\n return _dct \n"
] |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.savefile
|
python
|
def savefile(self, filepath=None):
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
|
Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L573-L597
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.openfile
|
python
|
def openfile(cls, filepath):
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
|
Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L601-L620
| null |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def yaml2tree(cls, yamltree):
"""Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
"""
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
vntree/node.py
|
Node.yaml2tree
|
python
|
def yaml2tree(cls, yamltree):
if not cls.YAML_setup:
cls.setup_yaml()
cls.YAML_setup = True
if os.path.isfile(yamltree):
with open(yamltree) as fh:
yaml_data = fh.read()
else:
yaml_data = yamltree
list_of_nodes = yaml.safe_load(yaml_data)
yamltree_root = list_of_nodes[0]
return yamltree_root
|
Class method that creates a tree from YAML.
| # Example yamltree data:
| - !Node &root
| name: "root node"
| parent: null
| data:
| testpara: 111
| - !Node &child1
| name: "child node"
| parent: *root
| - !Node &gc1
| name: "grand-child node"
| parent: *child1
:param yamltree: a string of YAML describing the nodes in the
tree, or the path to a file containing the data.
:type yamltree: str
:returns: the root node of the tree.
:rtype: Node
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/vntree/node.py#L636-L668
|
[
"def setup_yaml(cls):\n def yamlnode_constructor(loader, yamlnode) :\n fields = loader.construct_mapping(yamlnode, deep=True)\n return cls(**fields)\n yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)\n"
] |
class Node:
"""Class for creating vntree nodes.
:param name: node name
:type name: str or None
:param parent: The parent node of this node.
:type parent: Node or None
:param data: Dictionary containing node data.
:type data: dict or None
:param treedict: Dictionary specifying a complete tree.
:type treedict: dict or None
"""
YAML_setup = False
name = NodeAttr()
_vnpkl_fpath = TreeAttr("_vntree_meta")
def __init__(self, name=None, parent=None, data=None,
treedict=None, vnpkl_fpath=None):
if data and isinstance(data, dict):
#self.data = collections.defaultdict(dict, copy.deepcopy(data))
self.data = copy.deepcopy(data)
else:
self.data = {}
if name:
self.name = str(name)
elif not getattr(self, "name", None) and name is None:
self.name = ""
self.childs = []
if parent and issubclass(parent.__class__, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("{}.__init__: instance «{}» argument «parent» type not valid: {}".format(self.__class__.__name__, name, type(parent)))
if callable(name):
self.name = str(name(self))
if treedict and isinstance(treedict, dict):
self.from_treedict(treedict)
if vnpkl_fpath and isinstance(vnpkl_fpath, str):
self._vnpkl_fpath = vnpkl_fpath
def __str__(self):
return "{} coord={} «{}»".format(self.__class__.__name__, self._coord, self.name)
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.childs)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.childs)):
yield node
yield self
def add_child(self, node):
"""Add a child node to the current node instance.
:param node: the child node instance.
:type node: Node
:returns: The new child node instance.
:rtype: Node
"""
if not issubclass(node.__class__, Node):
raise TypeError("{}.add_child: arg «node»=«{}», type {} not valid.".format(self.__class__.__name__, node, type(node)))
self.childs.append(node)
node.parent = self
return node
def copy(self):
"""Return a deep copy of the sub-tree rooted at this node instance.
:returns: Copy of the sub-tree rooted at this node instance.
:rtype: Node
"""
return copy.deepcopy(self)
def remove_child(self, idx=None, *, name=None, node=None):
"""Remove a child node from the current node instance.
:param idx: Index of child node to be removed.
:type idx: int
:param name: The first child node found with «name» will be removed.
:type name: str
:param node: Child node to be removed.
:type node: Node
:returns: The node that has been removed, or False if not successful.
:rtype: Node or False
"""
if (idx and isinstance(idx, int) and
-len(self.childs) <= idx < len(self.childs) ):
return self.childs.pop(idx)
if name and isinstance(name, str):
found_node = None
for _n in self.childs:
if _n.name == name:
found_node = _n
break
if found_node:
self.childs.remove(found_node)
return found_node
if node and node in self.childs:
self.childs.remove(node)
return node
return False
@property
def _path(self):
"""Attribute indicating the absolute node path for this node.
Note that the absolute node path starts with a forward slash
followed by the root node's name: e.g:
`/root.name/child.name/grandchild.name`
Warning: it should be noted that use of _path assumes
that sibling nodes have unique names. If unique node paths
cannot be assured, use node attribute «_coord» instead.
:returns: The absolute node path for this node.
:rtype: str
"""
_path = pathlib.PurePosixPath(self.name)
_node = self
while _node.parent:
_path = _node.parent.name / _path
_node = _node.parent
_path = pathlib.posixpath.sep / _path
return _path.as_posix()
@property
def _coord(self):
"""Attribute indicating the tree coordinates for this node.
The tree coordinates of a node are expressed as a tuple of the
indices of the node and its ancestors, for example:
A grandchild node with node path
`/root.name/root.childs[2].name/root.childs[2].childs[0].name`
would have coordinates `(2,0)`.
The root node _coord is an empty tuple: `()`
:returns: the tree coordinates for this node.
:rtype: tuple
"""
_coord = []
_node = self
while _node.parent:
_idx = _node.parent.childs.index(_node)
_coord.insert(0, _idx)
_node = _node.parent
return tuple(_coord)
@property
def _level(self):
"""Attribute indicating the tree `level` for this node instance.
Note that the root node is defined as level 1.
:returns: the node `level`.
:rtype: int
"""
return len(self._coord) + 1
def get_data(self, *keys):
"""Get a value from the instance `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict keys referencing the required value.
:type keys: str
:returns: the value accessed by `keys` in `data`.
"""
if not keys:
_val = self.data
_datadict = self.data
for _key in keys:
_val = _datadict.get(_key, None)
if isinstance(_val, dict):
_datadict = _val
else:
break
if isinstance(_val, dict):
_val = copy.deepcopy(_val)
return _val
def set_data(self, *keys, value):
"""Set a value in the instance `data` dict.
:param keys: the `data` dict keys referencing the value in the `data` dict.
:type keys: str
:param value: the value to be set in the `data` dict. Note that
`value` is a keyword-only argument.
:returns: `True` if successful.
"""
_datadict = self.data
for ii, _key in enumerate(keys):
if ii==len(keys)-1:
_datadict[_key] = value
else:
if _key not in _datadict:
_datadict[_key] = {}
_datadict = _datadict[_key]
return True
@property
def _root(self):
"""Attribute referencing the root node of the tree.
:returns: the root node of the tree containing this instance.
:rtype: Node
"""
_n = self
while _n.parent:
_n = _n.parent
return _n
@property
def _ancestors(self):
"""Attribute referencing the tree ancestors of the node instance.
:returns: list of node ancestors in sequence, first item is
the current node instance (`self`), the last item is root.
:rtype: list of Node references
"""
# return list of ancestor nodes starting with self.parent and ending with root
_ancestors=[]
_n = self
while _n.parent:
_n = _n.parent
_ancestors.append(_n)
return _ancestors
def get_child_by_name(self, childname):
"""Get a child node of the current instance by its name.
:param childname: the name of the required child node.
:type childname: str
:returns: the first child node found with name `childname`.
:rtype: Node or None
"""
_childs = [_child for _child in self.childs if _child.name==childname]
if len(_childs)>1:
logger.warning("%s.get_child_by_name: node:«%s» has more than 1 childnode with name=«%s»." % (self.__class__.__name__, self.name, childname))
if len(_childs)==0:
_childnode = None
else:
_childnode = _childs[0]
return _childnode
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
def get_node_by_coord(self, coord, relative=False):
"""Get a node from a node coord.
:param coord: the coordinates of the required node.
:type coord: tuple or list
:param relative: `True` if coord is relative to the node instance,
`False` for absolute coordinates.
:type relative: bool
:returns: the node corresponding to `coord`.
:rtype: Node or None
"""
if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers." % (self.__class__.__name__, self.name, coord))
return None
if relative:
_node = self
else:
_node = self._root # _node = self.get_rootnode()
for idx in coord:
_node = _node.childs[idx]
if _node is None:
logger.warning("%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid." % (self.__class__.__name__, self.name, coord))
return None
return _node
def find_one_node(self, *keys, value, decend=True):
"""Find a node on the branch of the instance with a
`keys=data` item in the `data` dict.
Nested values are accessed by specifying the keys in sequence.
e.g. `node.get_data("country", "city")` would access
`node.data["country"]["city"]`
:param keys: the `data` dict key(s) referencing the required value.
:type keys: str
:param value: the value corresponding to `keys`. Note that
`value` is a keyword-only argument.
:param decend: `decend=True` traverse down the branch sub-tree
starting from `self`. `decend=False` traverse up the
branch from `self` towards root.
:type decend: bool
:returns: the first node found with `keys=data` in the `data` dict.
:rtype: Node or None
"""
if decend:
traversal = self
else:
traversal = self._ancestors
for _node in traversal:
_val = _node.get_data(*keys)
if _val == value:
return _node
return None
def to_texttree(self, indent=3, func=True, symbol='ascii'):
"""Method returning a text representation of the (sub-)tree
rooted at the current node instance (`self`).
:param indent: the indentation width for each tree level.
:type indent: int
:param func: function returning a string representation for
each node. e.g. `func=lambda n: str(n._coord)`
would show the node coordinates.
`func=True` node.name displayed for each node.
`func=False` no node representation, just
the tree structure is displayed.
:type func: function or bool
:param symbol: tuple of tree symbol characters.
`None` or 'ascii' gives a preformed ascii tree, equivalent to tuple :code:`(|, +, |, |, |, -, .)`.
'box' preformed with box-drawing characters, equivalent to tuple :code:`(┬, └, ┬, ├, └, ─, ⋅)`.
'unicode' preformed with unicode characters.
:type symbol: tuple or str or None
:returns: a string representation of the tree.
:rtype: str
"""
if indent<2:
indent=2
if func is True: # default func prints node.name
func = lambda n: "{}".format(n.name)
if isinstance(symbol, (list, tuple)):
s_root, s_branch, s_spar, s_fnode = symbol
elif symbol=="unicode":
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
elif symbol=="box": # https://en.wikipedia.org/wiki/Box-drawing_character
# ┬ └ ┬ ├ └ ─ ⋅
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"\u252c", "\u2514", "\u252c", "\u251c", "\u2514", "\u2500", "\u22c5")
else:
s_root, s_branch, s_fnode, s_mnode, s_lnode, s_spar, s_level = (
"|", "+", "|", "|", "|", "-", ".")
_text = ""
#local_root_level = len(self.ancestors)
local_root_level = self._level
for _n in self:
#level = len(node.ancestors) - local_root_level
level = _n._level - local_root_level
if level==0:
_text += s_root
elif _n.parent.childs[0] == _n and len(_n.parent.childs)>1: # first child
#s_spar="f"
_text += ( (s_level + " "*(indent-1))*(level-1)
+ s_branch
+ s_spar*(indent-1)
+ s_fnode)
elif _n.parent.childs[-1] == _n and len(_n.childs)==0: # last child, no children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_lnode )
elif _n.parent.childs[-1] == _n: # last child, has children
#s_spar="l"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
# elif level>0:
# _text += (s_level + " "*(indent-1))*(level-1) + s_branch + s_spar*(indent-1)
else:
#_text += s_fnode
#s_spar="m"
_text += ( (s_level + " "*(indent-1))*(level)
+ s_mnode )
#+ s_spar*(indent-1) )
if func and callable(func):
_text += func(_n)
_text += "\n"
return _text
def from_treedict(self, treedict):
if "data" in treedict:
#self.data = collections.defaultdict(dict, treedict["data"])
self.data = copy.deepcopy(treedict["data"])
for key, val in treedict.items():
if key in ["parent", "childs", "data"]:
continue
setattr(self, key, val)
if "childs" in treedict.keys():
for _childdict in treedict["childs"]:
#self.childs.append( self.__class__(parent=self, treedict=_childdict) )
self.__class__(parent=self, treedict=_childdict)
def to_treedict(self, recursive=True, vntree_meta=True):
# NOTE: replace vars(self) with self.__dict__ ( and self.__class__.__dict__ ?)
_dct = {k:v for k, v in vars(self).items() if k not in ["parent", "childs"]}
if not vntree_meta and "_vntree_meta" in _dct["data"]:
_dct["data"].pop("_vntree_meta")
if recursive and self.childs:
_dct["childs"] = []
for _child in self.childs:
_dct["childs"].append( _child.to_treedict(recursive=recursive) )
return _dct
def to_json(self, filepath, default=str):
pass
# def from_json(self, filepath):
# err = ""
# _treedict = None
# if isinstance(filepath, str) and os.path.isfile(filepath):
# try:
# with open(filepath, 'r') as _fh:
# _treedict = json.load(_fh)
# except Exception as err:
# pass
# if not _treedict:
# logger.warning("%s.from_json: node«%s», cannot open «filepath»=«%s», %s." % (self.__class__.__name__, self.name, filepath, err))
# return False
# else:
# self.from_treedict(treedict=_treedict)
# return True
def tree_compare(self, othertree, vntree_meta=False):
"""Compare the (sub-)tree rooted at `self` with another tree.
`tree_compare` converts the trees being compared into JSON string
representations, and uses `difflib.SequenceMatcher().ratio()` to
calculate a measure of the similarity of the strings.
:param othertree: the other tree for comparison.
:type othertree: Node
:param vntree_meta: include private vntree metadata in comparison.
:type vntree_meta: bool
:returns: similarity of the trees as a number between 0 and 1.
:rtype: float
"""
return SequenceMatcher(None,
json.dumps(self.to_treedict(vntree_meta=vntree_meta), default=str),
json.dumps(othertree.to_treedict(vntree_meta=vntree_meta), default=str)
).ratio()
def savefile(self, filepath=None):
"""Save (dump) the tree in a pickle file.
Note that this method saves the complete tree even when invoked on
a non-root node.
It is recommended to use the extension `.vnpkl` for this type of file.
:param filepath: the file path for the pickle file.
If `filepath=None` use `self._vnpkl_fpath` attribute, if set.
:type filepath: str or None
:returns: `True` if successful.
:rtype: bool
"""
if filepath:
self._vnpkl_fpath = os.path.abspath(filepath)
# if not _pfpath:
# logger.error("%s.save: «%s» file path «%s» not valid." % (self.__class__.__name__, self.name, _pfpath))
# return False
try:
with open(self._vnpkl_fpath, "wb") as pf:
pickle.dump(self._root.to_treedict(), pf)
except Exception as err:
logger.error("%s.savefile: arg `filepath`=«%s» `self._vnpkl_fpath`=«%s» error: %s" % (self.__class__.__name__, filepath, self._vnpkl_fpath, err))
return False
return True
@classmethod
def openfile(cls, filepath):
"""Class method that opens (load) a vntree pickle file.
:param filepath: the file path for the pickle file.
:type filepath: str
:returns: root node of tree or `False` if failure.
:rtype: Node or bool
"""
if not os.path.isfile(filepath):
logger.error("%s.openfile: arg `filepath`=«%s» not valid." % (cls.__name__, filepath))
return False
try:
with open(filepath, "rb") as pf:
pkldata = pickle.load(pf)
rootnode = cls(treedict=pkldata)
rootnode._vnpkl_fpath = os.path.abspath(filepath)
except Exception as err:
logger.error("%s.openfile: data in file «%s» not valid: %s" % (cls.__name__, filepath, err))
return False
return rootnode
@classmethod
def setup_yaml(cls):
def yamlnode_constructor(loader, yamlnode) :
fields = loader.construct_mapping(yamlnode, deep=True)
return cls(**fields)
yaml.SafeLoader.add_constructor('!'+cls.__name__, yamlnode_constructor)
# def yamlnode_representer(dumper, data):
# rep = '!Node'
# return dumper.represent_scalar('!Node', '%sd%s' % data)
# yaml.add_representer(cls, yamlnode_representer)
@classmethod
def tree2yaml(self):
# if not self.__class__.YAML_setup:
# self.__class__.setup_yaml()
# self.__class__.YAML_setup = True
def make_anchor(node):
# if node._root is node:
# anchor = "root"
# else:
anchor = "coord"
for _c in node._coord:
anchor += "-" + str(_c)
return anchor
yltree = "# dummy \n"
for _n in self:
_ncopy = _n.copy()
delattr(_ncopy, "childs")
delattr(_ncopy, "parent")
# if _n.parent:
# _ncopy.parent = "*" + make_anchor(_n.parent)
_yl = yaml.dump(_ncopy, default_flow_style=False)
_n_yl = '!'+_n.__class__.__name__ + " &" + make_anchor(_n)
if _n.parent:
_n_yl += "\nparent: " + "*" + make_anchor(_n.parent)
# else:
# _n_yl += "\n"
_n_yl += _yl[_yl.index("\n"):]
yltree += _n_yl
yltree = textwrap.indent(yltree, " ")
yltree = yltree.strip()
yltree = yltree.replace("\n !", "\n- !")
return yltree
|
qwilka/vn-tree
|
examples/simple_tree.py
|
make_file_system_tree
|
python
|
def make_file_system_tree(root_folder, _parent=None):
root_node = Node(os.path.basename(root_folder), _parent)
root_node.path = root_folder
for item in os.listdir(root_folder):
item_path = os.path.join(root_folder, item)
if os.path.isfile(item_path):
file_node = Node(os.path.basename(item), root_node)
file_node.path = item_path
elif os.path.isdir(item_path):
#folder_path = os.path.join(root_folder, item)
make_file_system_tree(item_path, _parent=root_node)
return root_node
|
This function makes a tree from folders and files.
|
train
|
https://github.com/qwilka/vn-tree/blob/f08106e9c7232d8748d78d1d39b019699a7407dd/examples/simple_tree.py#L126-L139
| null |
import itertools
import os
import sys
# A «class» provides the specification for an «object».
# An «object» is a data structure with integrated functions (called «methods» in Python).
class Node:
"""Node class for tree data structure.
Ref: https://en.wikipedia.org/wiki/Tree_(data_structure)
"""
# __init__ is a special method that is called when an «instance» of the class
# is created. The first «argument» "self" is a reference to the instance object.
def __init__(self, name, parent=None):
self.name = name
self.children = []
if parent and isinstance(parent, Node):
parent.add_child(self)
elif parent is None:
self.parent = None
else:
raise TypeError("Node instance «{}» argument «parent» type not valid: {}".format(name, type(parent)))
def __str__(self):
_level = len(self.get_ancestors())
return "{} level={} «{}»".format(self.__class__.__name__, _level, self.name)
# __iter__ is a special method that turns the tree into an «iterator».
# This enables convenient tree traversal (using a "for" loop, for example).
# "yield" turns the «iterator» into a «generator».
def __iter__(self):
yield self
for node in itertools.chain(*map(iter, self.children)):
yield node
def __reversed__(self):
for node in itertools.chain(*map(reversed, self.children)):
yield node
yield self
def add_child(self, newnode):
self.children.append(newnode)
newnode.parent = self
return True
def get_ancestors(self):
ancestors=[]
_curnode = self
while _curnode.parent:
_curnode = _curnode.parent
ancestors.append(_curnode)
return ancestors
def to_texttree(self):
treetext = ""
root_level = len(self.get_ancestors())
for node in self:
level = len(node.get_ancestors()) - root_level
treetext += ("." + " "*3)*level + "|---{}\n".format(node.name)
return treetext
# This class specifies a "decision tree" through «inheritance» from the
# "Node" class, re-implementing some of the "Node" properties to
# change its behaviour.
class DecisionNode(Node):
"""
Ref: https://en.wikipedia.org/wiki/Decision_tree
"""
def __init__(self, name, parent=None, type="outcome", p=1, npv=0):
super().__init__(name, parent)
self.p = p
self.npv = npv
self.ev = self.p * self.npv
self.type = type
self.decision = ""
def to_texttree(self):
treetext = ""
root_level = len(self.get_ancestors())
for node in self:
if node.type == "chance":
type_sym = "\u25ef"
elif node.type == "decision":
type_sym = "\u25a1"
elif node.type == "outcome" and node.p==1:
type_sym = "|"
elif node.type == "outcome":
type_sym = "\u25b7"
level = len(node.get_ancestors()) - root_level
treetext += ("." + " "*3)*level + "{}---{}".format(type_sym, node)
if node.type == "decision":
treetext += "; DECISION: «{}»\n".format(node.decision)
treetext += " "*4*(level+1) + "|\n"
elif node.type == "chance":
treetext += "; EV={}\n".format(node.ev)
elif node.type == "outcome":
treetext += "; p={}; NPV={}; EV={}\n".format(node.p, node.npv, node.ev)
if node.children:
treetext += " "*4*(level+1) + "|\n"
else:
treetext += "\n"
return treetext
@staticmethod # "@" indicates a «decorator» that modifies the method.
def calculate_ev(node): # a "staticmethod" does not use the instance reference "self".
##print(node)
if node.children and node.type == "chance":
node.npv = 0
for child in node.children:
node.npv += child.p * child.npv
elif node.type == "decision":
best_npv = -sys.float_info.max
for child in node.children:
##print("decision child", child, child.npv, best_npv, child.npv > best_npv)
if child.npv > best_npv:
best_npv = child.npv
best_option = child.name
node.npv = best_npv
node.decision = best_option
elif node.type == "outcome" and node.children:
node.npv = node.children[0].npv
node.ev = node.p * node.npv
return node.name, node.ev
if __name__ == '__main__':
SIMPLE_TREE = True
FILES_FOLDERS_TREE = False
DECISION_TREE = False
if SIMPLE_TREE:
rootnode = Node('ROOT ("top" of the tree)')
Node("1st child (leaf node)", parent=rootnode)
child2 = Node("2nd child", rootnode)
Node("grand-child1 (leaf node)", child2)
Node("grand-child2 (leaf node)", child2)
child3 = Node("3rd child", rootnode)
Node("another child (leaf node)", rootnode)
grandchild3 = Node(parent=child3, name="grand-child3")
ggrandchild = Node("great-grandchild", grandchild3)
Node("great-great-grandchild (leaf node)", ggrandchild)
Node("great-grandchild2 (leaf node)", grandchild3)
print()
print(rootnode.to_texttree())
print("\nTree iterate top-down:")
for node in rootnode:
print(node)
print("\nTree iterate bottom-up:")
for node in reversed(rootnode):
print(node)
if FILES_FOLDERS_TREE:
# Specify a folder path on your computer in the variable «root_folder».
# Choose a folder with only a few files and sub-folders, to avoid creating a large tree.
# Windows users should note that there are complications when specifying a
# file path in Python, see this link: https://stackoverflow.com/a/46011113
root_folder = r"/home/develop/Projects/src/visinum/visinum"
if not os.path.isdir(root_folder):
print("ERROR: «{}» is not a valid folder!".format(root_folder))
sys.exit()
files_folders_tree = make_file_system_tree(root_folder)
#dummynode = Node("dummynode", files_folders_tree)
#dummynode.path = ""
for item in files_folders_tree:
print(item.path, end="")
if os.path.isdir(item.path):
print(" is a FOLDER")
elif os.path.isfile(item.path):
print(" is a FILE")
else:
raise OSError("«{}» is not a valid file or folder!".format(item))
print(files_folders_tree.to_texttree())
if DECISION_TREE:
# http://www.maxvalue.com/DA_in_CE_20170329.pdf DECISION ANALYSIS IN COST ENGINEERING, John Schuyler, 2017
ALD_decision = DecisionNode("Acquire and drill | Do not acquire?", type="decision")
No_acq = DecisionNode("Do not acquire lease", ALD_decision, npv=0)
tw1_chance = DecisionNode("Acquire and drill", ALD_decision, type="chance")
tw1_large = DecisionNode("Large field", tw1_chance, p=0.05, npv=360)
tw1_marg = DecisionNode("Marginal field", tw1_chance, p=0.05, npv=110)
tw1_dry = DecisionNode("Dry hole", tw1_chance, p=0.9)
tw2_decision = DecisionNode("Drill test well #2 | drop?", tw1_dry, type="decision")
tw2_chance = DecisionNode("Drill test well #2", tw2_decision, type="chance")
tw2_large = DecisionNode("Large field", tw2_chance, p=0.075, npv=350)
tw2_marg = DecisionNode("Marginal field", tw2_chance, p=0.075, npv=100)
tw2_dry = DecisionNode("Dry hole", tw2_chance, p=0.85, npv=-50)
tw2_drop = DecisionNode("Drop", tw2_decision, npv=-40)
# for node in acq_lease_drill:
# print(node)
rootnode = ALD_decision
EVs = list(map(rootnode.calculate_ev, reversed(rootnode)))
print("EMV for «{}» = ${} million".format(rootnode.decision, rootnode.npv))
print(rootnode.to_texttree())
|
mediawiki-utilities/python-mwtypes
|
mwtypes/user.py
|
User.initialize
|
python
|
def initialize(self, id=None, text=None):
self.id = none_or(id, int)
self.text = none_or(text, str)
"""
Username or IP address of the user at the time of the edit : str | None
"""
|
Contributing user's identifier : int | None
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/user.py#L28-L37
|
[
"def none_or(val, func):\n if val is None:\n return None\n else:\n return func(val)\n"
] |
class User(jsonable.Type):
"""
Contributing user metadata.
:Attributes:
.. autoattribute:: mwtypes.User.id
:annotation: = Contributing user's identifier : int | None
.. autoattribute:: mwtypes.User.text
:annotation: = Username or IP address of the user at the time of
the edit : str | None
"""
__slots__ = ('id', 'text')
|
mediawiki-utilities/python-mwtypes
|
mwtypes/page.py
|
Page.initialize
|
python
|
def initialize(self, id=None, title=None, namespace=None, redirect=None,
restrictions=None):
self.id = none_or(id, int)
self.title = none_or(title, str)
"""
Page title (namespace excluded) : `str`
"""
self.namespace = none_or(namespace, int)
"""
Namespace ID : `int`
"""
self.redirect = none_or(redirect, str)
"""
Page name that the page redirects to : `str` | `None`
"""
self.restrictions = none_or(restrictions, list)
"""
A list of page editing restrictions : list( `str` ) | `None`
"""
|
Page ID : `int`
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/page.py#L35-L60
|
[
"def none_or(val, func):\n if val is None:\n return None\n else:\n return func(val)\n"
] |
class Page(jsonable.Type):
"""
Page metadata
:Attributes:
.. autoattribute:: mwtypes.Page.id
:annotation: = Page ID : int
.. autoattribute:: mwtypes.Page.title
:annotation: = Page title: str
.. autoattribute:: mwtypes.Page.namespace
:annotation: = Namespace ID: int
.. autoattribute:: mwtypes.Page.redirect
:annotation: = Page name that this page redirects to : str | None
.. autoattribute:: mwtypes.Page.restrictions
:annotation: = A list of page editing restrictions :
list( `str` ) | `None`
"""
__slots__ = ('id', 'title', 'namespace', 'redirect', 'restrictions')
|
mediawiki-utilities/python-mwtypes
|
mwtypes/files/p7z.py
|
reader
|
python
|
def reader(path):
p = subprocess.Popen(
['7z', 'e', '-so', path],
stdout=subprocess.PIPE,
stderr=file_open(os.devnull, "w")
)
return io.TextIOWrapper(p.stdout, encoding='utf-8',
errors='replace')
|
Turns a path to a dump file into a file-like object of (decompressed)
XML data assuming that '7z' is installed and will know what to do.
:Parameters:
path : `str`
the path to the dump file to read
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/files/p7z.py#L8-L23
| null |
import io
import os
import subprocess
file_open = open
|
mediawiki-utilities/python-mwtypes
|
mwtypes/log_item.py
|
Deleted.initialize
|
python
|
def initialize(self, action=None, comment=None, user=None,
restricted=None):
self.action = none_or(action, bool)
self.comment = none_or(comment, bool)
"""
Is the comment of this revision deleted/suppressed? : `bool`
"""
self.user = none_or(user, bool)
"""
Is the user of this revision deleted/suppressed? : `bool`
"""
self.restricted = none_or(restricted, bool)
"""
Is the revision restricted? : `bool`
"""
|
Is the text of this revision deleted/suppressed? : `bool`
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/log_item.py#L38-L58
|
[
"def none_or(val, func):\n if val is None:\n return None\n else:\n return func(val)\n"
] |
class Deleted(jsonable.Type):
"""
Represents information about the deleted/suppressed status of a log item
and it's associated data.
:Attributes:
.. autoattribute:: mwtypes.log_item.Deleted.action
:annotation: = Is the action of this log item deleted/suppressed? :
bool | None
.. autoattribute:: mwtypes.log_item.Deleted.comment
:annotation: = Is the text of this log item deleted/suppressed? :
bool | None
.. autoattribute:: mwtypes.log_item.Deleted.user
:annotation: = Is the user of this log item deleted/suppressed? :
bool | None
.. autoattribute:: mwtypes.log_item.Deleted.restricted
:annotation: = Is the log item restricted? : bool | None
"""
__slots__ = ('action', 'comment', 'user', 'restricted')
@classmethod
def from_int(cls, integer):
"""
Constructs a `Deleted` using the `tinyint` value of the `log_deleted`
column of the `logging` MariaDB table.
* DELETED_ACTION = 1
* DELETED_COMMENT = 2
* DELETED_USER = 4
* DELETED_RESTRICTED = 8
"""
bin_string = bin(integer)
return cls(
action=len(bin_string) >= 1 and bin_string[-1] == "1",
comment=len(bin_string) >= 2 and bin_string[-2] == "1",
user=len(bin_string) >= 3 and bin_string[-3] == "1",
restricted=len(bin_string) >= 4 and bin_string[-4] == "1"
)
|
mediawiki-utilities/python-mwtypes
|
mwtypes/log_item.py
|
LogItem.initialize
|
python
|
def initialize(self, id, timestamp=None, user=None, page=None,
comment=None, type=None, action=None, text=None,
params=None, deleted=None):
self.id = int(id)
self.timestamp = none_or(timestamp, Timestamp)
"""
log item timestamp : :class:`mwtypes.Timestamp`
"""
self.user = none_or(user, User)
"""
Contributing user metadata : :class:`mwtypes.User`
"""
self.page = none_or(page, self.Page)
"""
Related page : :class:`mwtypes.log_item.Page`
"""
self.comment = none_or(comment, str)
"""
Comment left with log item : `str`
"""
self.type = none_or(type, str)
"""
Type of log item : `str`
"""
self.action = none_or(action, str)
"""
Action of log item : `str`
"""
self.text = none_or(text, str)
"""
Content of text : `str`
"""
self.params = none_or(params, str)
"""
Content of params : `str`
"""
self.deleted = none_or(deleted, self.Deleted)
"""
The deleted/suppressed status of the log item.
"""
|
Log item ID : `int`
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/log_item.py#L145-L197
|
[
"def none_or(val, func):\n if val is None:\n return None\n else:\n return func(val)\n"
] |
class LogItem(jsonable.Type):
"""
Log item metadata
:Attributes:
.. autoattribute:: mwtypes.LogItem.id
:annotation: = Log item ID : int
.. autoattribute:: mwtypes.LogItem.timestamp
:annotation: = Log item timestamp :
mwtypes.Timestamp | None
.. autoattribute:: mwtypes.LogItem.user
:annotation: = Contributing user metadata :
mwtypes.User | None
.. autoattribute:: mwtypes.LogItem.page
:annotation: = Contributing user metadata :
mwtypes.log_item.Page | None
.. autoattribute:: mwtypes.LogItem.comment
:annotation: = Comment left with log item : str | None
.. autoattribute:: mwtypes.LogItem.type
:annotation: = Content of text : str | None
.. autoattribute:: mwtypes.LogItem.action
:annotation: = Content of text : str | None
.. autoattribute:: mwtypes.LogItem.text
:annotation: = Content of text : str | None
.. autoattribute:: mwtypes.LogItem.params
:annotation: = Content of params : str | None
.. autoattribute:: mwtypes.LogItem.deleted
:annotation: = The deleted/suppressed status of the log item :
mwtypes.log_item.Deleted | None
"""
__slots__ = ('id', 'timestamp', 'user', 'page', 'comment', 'type',
'action', 'text', 'params', 'deleted')
Deleted = Deleted
Page = Page
|
mediawiki-utilities/python-mwtypes
|
mwtypes/timestamp.py
|
Timestamp.strptime
|
python
|
def strptime(cls, string, format):
return cls.from_time_struct(time.strptime(string, format))
|
Constructs a :class:`mw.Timestamp` from an explicitly formatted string.
See `<https://docs.python.org/3/library/time.html#time.strftime>`_ for a
discussion of formats descriptors.
:Parameters:
string : str
A formatted timestamp
format : str
The format description
:Returns:
:class:`mw.Timestamp`
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/timestamp.py#L140-L155
| null |
class Timestamp(jsonable.Type):
"""
Provides a set of convenience functions for working with MediaWiki
timestamps. This class can interpret and return multiple formats as well as
perform basic mathematical operations.
:Parameters:
time_thing : `time.time_struct` | `datetime.datetime` | `str` | `int`
The timestamp type from which to construct the timestamp class.
You can make use of a lot of different *time things* to initialize a
:class:`mw.Timestamp`.
* If a :py:class:`~time.time_struct` or :py:class:`~datetime.datetime` are
provided, a `Timestamp` will be constructed from their values.
* If an `int` or `float` are provided, they will be assumed to a unix
timestamp in seconds since Jan. 1st, 1970 UTC.
* If a `str` is provided, it will be be checked against known MediaWiki
timestamp formats. E.g., ``'%Y%m%d%H%M%S'`` and ``'%Y-%m-%dT%H:%M:%SZ'``.
For example::
>>> import datetime, time
>>> from mwtypes import Timestamp
>>> Timestamp(1234567890)
Timestamp('2009-02-13T23:31:30Z')
>>> Timestamp(1234567890) == Timestamp("2009-02-13T23:31:30Z")
True
>>> Timestamp(1234567890) == Timestamp("20090213233130")
True
>>> Timestamp(1234567890) == Timestamp(datetime.datetime.utcfromtimestamp(1234567890))
True
>>> Timestamp(1234567890) == Timestamp(time.strptime("2009-02-13T23:31:30Z", "%Y-%m-%dT%H:%M:%SZ"))
True
You can also do math and comparisons of timestamps.::
>>> from mw import Timestamp
>>> t = Timestamp(1234567890)
>>> t
Timestamp('2009-02-13T23:31:30Z')
>>> t2 = t + 10
>>> t2
Timestamp('2009-02-13T23:31:40Z')
>>> t += 1
>>> t
Timestamp('2009-02-13T23:31:31Z')
>>> t2 - t
9
>>> t < t2
True
"""
__slots__ = ('__time',)
def __new__(cls, time_thing):
if isinstance(time_thing, cls):
return time_thing
elif isinstance(time_thing, time.struct_time):
return cls.from_time_struct(time_thing)
elif isinstance(time_thing, datetime.datetime):
return cls.from_datetime(time_thing)
elif type(time_thing) in (int, float):
return cls.from_unix(time_thing)
else:
return cls.from_string(time_thing)
def initialize(self, time_struct):
self.__time = time_struct
def short_format(self):
"""
Constructs a long, ``'%Y%m%d%H%M%S'`` formatted string common to the
database. This method is roughly equivalent to calling
``strftime('%Y%m%d%H%M%S')``.
:Parameters:
format : str
The string format
:Returns:
A formatted string
"""
return self.strftime(SHORT_MW_TIME_STRING)
def long_format(self):
"""
Constructs a long, ``'%Y-%m-%dT%H:%M:%SZ'`` formatted string common to the
API. This method is roughly equivalent to calling
``strftime('%Y-%m-%dT%H:%M:%SZ')``.
:Parameters:
format : str
The string format
:Returns:
A formatted string
"""
return self.strftime(LONG_MW_TIME_STRING)
def strftime(self, format):
"""
Constructs a formatted string.
See `<https://docs.python.org/3/library/time.html#time.strftime>`_ for a
discussion of formats descriptors.
:Parameters:
format : str
The format description
:Returns:
A formatted string
"""
return time.strftime(format, self.__time)
@classmethod
@classmethod
def from_time_struct(cls, time_struct):
"""
Constructs a :class:`mw.Timestamp` from a :class:`time.time_struct`.
:Parameters:
time_struct : :class:`time.time_struct`
A time structure
:Returns:
:class:`mw.Timestamp`
"""
instance = super().__new__(cls, time_struct)
instance.initialize(time_struct)
return instance
@classmethod
def from_datetime(cls, dt):
"""
Constructs a :class:`mw.Timestamp` from a :class:`datetime.datetime`.
:Parameters:
dt : :class:`datetime.datetime``
A datetime.
:Returns:
:class:`mw.Timestamp`
"""
time_struct = dt.timetuple()
return cls.from_time_struct(time_struct)
@classmethod
def from_unix(cls, seconds):
"""
Constructs a :class:`mw.Timestamp` from a unix timestamp (in seconds
since Jan. 1st, 1970 UTC).
:Parameters:
seconds : int
A unix timestamp
:Returns:
:class:`mw.Timestamp`
"""
time_struct = datetime.datetime.utcfromtimestamp(seconds).timetuple()
return cls.from_time_struct(time_struct)
@classmethod
def from_string(cls, string):
"""
Constructs a :class:`mw.Timestamp` from a MediaWiki formatted string.
This method is provides a convenient way to construct from common
MediaWiki timestamp formats. E.g., ``%Y%m%d%H%M%S`` and
``%Y-%m-%dT%H:%M:%SZ``.
:Parameters:
string : str
A formatted timestamp
:Returns:
:class:`mw.Timestamp`
"""
if type(string) == bytes:
string = str(string, 'utf8')
else:
string = str(string)
try:
return cls.strptime(string, SHORT_MW_TIME_STRING)
except ValueError as e:
try:
return cls.strptime(string, LONG_MW_TIME_STRING)
except ValueError as e:
raise ValueError(
"{0} is not a valid Wikipedia date format".format(
repr(string)
)
)
return cls.from_time_struct(time_struct)
def __format__(self, format):
return self.strftime(format)
def __str__(self):
return self.long_format()
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
repr(self.long_format())
)
def to_json(self):
return self.long_format()
@classmethod
def from_json(cls, time_thing):
return cls(time_thing)
def __int__(self):
return self.unix()
def __float__(self):
return float(self.unix())
def unix(self):
"""
:Returns:
the number of seconds since Jan. 1st, 1970 UTC.
"""
return int(calendar.timegm(self.__time))
def __sub__(self, other):
if isinstance(other, Timestamp):
return self.unix() - other.unix()
else:
return self + (other * -1)
def __add__(self, seconds):
return Timestamp(self.unix() + seconds)
def __eq__(self, other):
try:
return self.__time == other.__time
except AttributeError:
return False
def __lt__(self, other):
try:
return self.__time < other.__time
except AttributeError:
return NotImplemented
def __gt__(self, other):
try:
return self.__time > other.__time
except AttributeError:
return NotImplemented
def __le__(self, other):
try:
return self.__time <= other.__time
except AttributeError:
return NotImplemented
def __ge__(self, other):
try:
return self.__time >= other.__time
except AttributeError:
return NotImplemented
def __ne__(self, other):
try:
return not self.__time == other.__time
except AttributeError:
return NotImplemented
def __getstate__(self):
return {'__time': self.__time}
|
mediawiki-utilities/python-mwtypes
|
mwtypes/timestamp.py
|
Timestamp.from_time_struct
|
python
|
def from_time_struct(cls, time_struct):
instance = super().__new__(cls, time_struct)
instance.initialize(time_struct)
return instance
|
Constructs a :class:`mw.Timestamp` from a :class:`time.time_struct`.
:Parameters:
time_struct : :class:`time.time_struct`
A time structure
:Returns:
:class:`mw.Timestamp`
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/timestamp.py#L158-L171
| null |
class Timestamp(jsonable.Type):
"""
Provides a set of convenience functions for working with MediaWiki
timestamps. This class can interpret and return multiple formats as well as
perform basic mathematical operations.
:Parameters:
time_thing : `time.time_struct` | `datetime.datetime` | `str` | `int`
The timestamp type from which to construct the timestamp class.
You can make use of a lot of different *time things* to initialize a
:class:`mw.Timestamp`.
* If a :py:class:`~time.time_struct` or :py:class:`~datetime.datetime` are
provided, a `Timestamp` will be constructed from their values.
* If an `int` or `float` are provided, they will be assumed to a unix
timestamp in seconds since Jan. 1st, 1970 UTC.
* If a `str` is provided, it will be be checked against known MediaWiki
timestamp formats. E.g., ``'%Y%m%d%H%M%S'`` and ``'%Y-%m-%dT%H:%M:%SZ'``.
For example::
>>> import datetime, time
>>> from mwtypes import Timestamp
>>> Timestamp(1234567890)
Timestamp('2009-02-13T23:31:30Z')
>>> Timestamp(1234567890) == Timestamp("2009-02-13T23:31:30Z")
True
>>> Timestamp(1234567890) == Timestamp("20090213233130")
True
>>> Timestamp(1234567890) == Timestamp(datetime.datetime.utcfromtimestamp(1234567890))
True
>>> Timestamp(1234567890) == Timestamp(time.strptime("2009-02-13T23:31:30Z", "%Y-%m-%dT%H:%M:%SZ"))
True
You can also do math and comparisons of timestamps.::
>>> from mw import Timestamp
>>> t = Timestamp(1234567890)
>>> t
Timestamp('2009-02-13T23:31:30Z')
>>> t2 = t + 10
>>> t2
Timestamp('2009-02-13T23:31:40Z')
>>> t += 1
>>> t
Timestamp('2009-02-13T23:31:31Z')
>>> t2 - t
9
>>> t < t2
True
"""
__slots__ = ('__time',)
def __new__(cls, time_thing):
if isinstance(time_thing, cls):
return time_thing
elif isinstance(time_thing, time.struct_time):
return cls.from_time_struct(time_thing)
elif isinstance(time_thing, datetime.datetime):
return cls.from_datetime(time_thing)
elif type(time_thing) in (int, float):
return cls.from_unix(time_thing)
else:
return cls.from_string(time_thing)
def initialize(self, time_struct):
self.__time = time_struct
def short_format(self):
"""
Constructs a long, ``'%Y%m%d%H%M%S'`` formatted string common to the
database. This method is roughly equivalent to calling
``strftime('%Y%m%d%H%M%S')``.
:Parameters:
format : str
The string format
:Returns:
A formatted string
"""
return self.strftime(SHORT_MW_TIME_STRING)
def long_format(self):
"""
Constructs a long, ``'%Y-%m-%dT%H:%M:%SZ'`` formatted string common to the
API. This method is roughly equivalent to calling
``strftime('%Y-%m-%dT%H:%M:%SZ')``.
:Parameters:
format : str
The string format
:Returns:
A formatted string
"""
return self.strftime(LONG_MW_TIME_STRING)
def strftime(self, format):
"""
Constructs a formatted string.
See `<https://docs.python.org/3/library/time.html#time.strftime>`_ for a
discussion of formats descriptors.
:Parameters:
format : str
The format description
:Returns:
A formatted string
"""
return time.strftime(format, self.__time)
@classmethod
def strptime(cls, string, format):
"""
Constructs a :class:`mw.Timestamp` from an explicitly formatted string.
See `<https://docs.python.org/3/library/time.html#time.strftime>`_ for a
discussion of formats descriptors.
:Parameters:
string : str
A formatted timestamp
format : str
The format description
:Returns:
:class:`mw.Timestamp`
"""
return cls.from_time_struct(time.strptime(string, format))
@classmethod
@classmethod
def from_datetime(cls, dt):
"""
Constructs a :class:`mw.Timestamp` from a :class:`datetime.datetime`.
:Parameters:
dt : :class:`datetime.datetime``
A datetime.
:Returns:
:class:`mw.Timestamp`
"""
time_struct = dt.timetuple()
return cls.from_time_struct(time_struct)
@classmethod
def from_unix(cls, seconds):
"""
Constructs a :class:`mw.Timestamp` from a unix timestamp (in seconds
since Jan. 1st, 1970 UTC).
:Parameters:
seconds : int
A unix timestamp
:Returns:
:class:`mw.Timestamp`
"""
time_struct = datetime.datetime.utcfromtimestamp(seconds).timetuple()
return cls.from_time_struct(time_struct)
@classmethod
def from_string(cls, string):
"""
Constructs a :class:`mw.Timestamp` from a MediaWiki formatted string.
This method is provides a convenient way to construct from common
MediaWiki timestamp formats. E.g., ``%Y%m%d%H%M%S`` and
``%Y-%m-%dT%H:%M:%SZ``.
:Parameters:
string : str
A formatted timestamp
:Returns:
:class:`mw.Timestamp`
"""
if type(string) == bytes:
string = str(string, 'utf8')
else:
string = str(string)
try:
return cls.strptime(string, SHORT_MW_TIME_STRING)
except ValueError as e:
try:
return cls.strptime(string, LONG_MW_TIME_STRING)
except ValueError as e:
raise ValueError(
"{0} is not a valid Wikipedia date format".format(
repr(string)
)
)
return cls.from_time_struct(time_struct)
def __format__(self, format):
return self.strftime(format)
def __str__(self):
return self.long_format()
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
repr(self.long_format())
)
def to_json(self):
return self.long_format()
@classmethod
def from_json(cls, time_thing):
return cls(time_thing)
def __int__(self):
return self.unix()
def __float__(self):
return float(self.unix())
def unix(self):
"""
:Returns:
the number of seconds since Jan. 1st, 1970 UTC.
"""
return int(calendar.timegm(self.__time))
def __sub__(self, other):
if isinstance(other, Timestamp):
return self.unix() - other.unix()
else:
return self + (other * -1)
def __add__(self, seconds):
return Timestamp(self.unix() + seconds)
def __eq__(self, other):
try:
return self.__time == other.__time
except AttributeError:
return False
def __lt__(self, other):
try:
return self.__time < other.__time
except AttributeError:
return NotImplemented
def __gt__(self, other):
try:
return self.__time > other.__time
except AttributeError:
return NotImplemented
def __le__(self, other):
try:
return self.__time <= other.__time
except AttributeError:
return NotImplemented
def __ge__(self, other):
try:
return self.__time >= other.__time
except AttributeError:
return NotImplemented
def __ne__(self, other):
try:
return not self.__time == other.__time
except AttributeError:
return NotImplemented
def __getstate__(self):
return {'__time': self.__time}
|
mediawiki-utilities/python-mwtypes
|
mwtypes/timestamp.py
|
Timestamp.from_unix
|
python
|
def from_unix(cls, seconds):
time_struct = datetime.datetime.utcfromtimestamp(seconds).timetuple()
return cls.from_time_struct(time_struct)
|
Constructs a :class:`mw.Timestamp` from a unix timestamp (in seconds
since Jan. 1st, 1970 UTC).
:Parameters:
seconds : int
A unix timestamp
:Returns:
:class:`mw.Timestamp`
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/timestamp.py#L189-L202
| null |
class Timestamp(jsonable.Type):
"""
Provides a set of convenience functions for working with MediaWiki
timestamps. This class can interpret and return multiple formats as well as
perform basic mathematical operations.
:Parameters:
time_thing : `time.time_struct` | `datetime.datetime` | `str` | `int`
The timestamp type from which to construct the timestamp class.
You can make use of a lot of different *time things* to initialize a
:class:`mw.Timestamp`.
* If a :py:class:`~time.time_struct` or :py:class:`~datetime.datetime` are
provided, a `Timestamp` will be constructed from their values.
* If an `int` or `float` are provided, they will be assumed to a unix
timestamp in seconds since Jan. 1st, 1970 UTC.
* If a `str` is provided, it will be be checked against known MediaWiki
timestamp formats. E.g., ``'%Y%m%d%H%M%S'`` and ``'%Y-%m-%dT%H:%M:%SZ'``.
For example::
>>> import datetime, time
>>> from mwtypes import Timestamp
>>> Timestamp(1234567890)
Timestamp('2009-02-13T23:31:30Z')
>>> Timestamp(1234567890) == Timestamp("2009-02-13T23:31:30Z")
True
>>> Timestamp(1234567890) == Timestamp("20090213233130")
True
>>> Timestamp(1234567890) == Timestamp(datetime.datetime.utcfromtimestamp(1234567890))
True
>>> Timestamp(1234567890) == Timestamp(time.strptime("2009-02-13T23:31:30Z", "%Y-%m-%dT%H:%M:%SZ"))
True
You can also do math and comparisons of timestamps.::
>>> from mw import Timestamp
>>> t = Timestamp(1234567890)
>>> t
Timestamp('2009-02-13T23:31:30Z')
>>> t2 = t + 10
>>> t2
Timestamp('2009-02-13T23:31:40Z')
>>> t += 1
>>> t
Timestamp('2009-02-13T23:31:31Z')
>>> t2 - t
9
>>> t < t2
True
"""
__slots__ = ('__time',)
def __new__(cls, time_thing):
if isinstance(time_thing, cls):
return time_thing
elif isinstance(time_thing, time.struct_time):
return cls.from_time_struct(time_thing)
elif isinstance(time_thing, datetime.datetime):
return cls.from_datetime(time_thing)
elif type(time_thing) in (int, float):
return cls.from_unix(time_thing)
else:
return cls.from_string(time_thing)
def initialize(self, time_struct):
self.__time = time_struct
def short_format(self):
"""
Constructs a long, ``'%Y%m%d%H%M%S'`` formatted string common to the
database. This method is roughly equivalent to calling
``strftime('%Y%m%d%H%M%S')``.
:Parameters:
format : str
The string format
:Returns:
A formatted string
"""
return self.strftime(SHORT_MW_TIME_STRING)
def long_format(self):
"""
Constructs a long, ``'%Y-%m-%dT%H:%M:%SZ'`` formatted string common to the
API. This method is roughly equivalent to calling
``strftime('%Y-%m-%dT%H:%M:%SZ')``.
:Parameters:
format : str
The string format
:Returns:
A formatted string
"""
return self.strftime(LONG_MW_TIME_STRING)
def strftime(self, format):
"""
Constructs a formatted string.
See `<https://docs.python.org/3/library/time.html#time.strftime>`_ for a
discussion of formats descriptors.
:Parameters:
format : str
The format description
:Returns:
A formatted string
"""
return time.strftime(format, self.__time)
@classmethod
def strptime(cls, string, format):
"""
Constructs a :class:`mw.Timestamp` from an explicitly formatted string.
See `<https://docs.python.org/3/library/time.html#time.strftime>`_ for a
discussion of formats descriptors.
:Parameters:
string : str
A formatted timestamp
format : str
The format description
:Returns:
:class:`mw.Timestamp`
"""
return cls.from_time_struct(time.strptime(string, format))
@classmethod
def from_time_struct(cls, time_struct):
"""
Constructs a :class:`mw.Timestamp` from a :class:`time.time_struct`.
:Parameters:
time_struct : :class:`time.time_struct`
A time structure
:Returns:
:class:`mw.Timestamp`
"""
instance = super().__new__(cls, time_struct)
instance.initialize(time_struct)
return instance
@classmethod
def from_datetime(cls, dt):
"""
Constructs a :class:`mw.Timestamp` from a :class:`datetime.datetime`.
:Parameters:
dt : :class:`datetime.datetime``
A datetime.
:Returns:
:class:`mw.Timestamp`
"""
time_struct = dt.timetuple()
return cls.from_time_struct(time_struct)
@classmethod
@classmethod
def from_string(cls, string):
"""
Constructs a :class:`mw.Timestamp` from a MediaWiki formatted string.
This method is provides a convenient way to construct from common
MediaWiki timestamp formats. E.g., ``%Y%m%d%H%M%S`` and
``%Y-%m-%dT%H:%M:%SZ``.
:Parameters:
string : str
A formatted timestamp
:Returns:
:class:`mw.Timestamp`
"""
if type(string) == bytes:
string = str(string, 'utf8')
else:
string = str(string)
try:
return cls.strptime(string, SHORT_MW_TIME_STRING)
except ValueError as e:
try:
return cls.strptime(string, LONG_MW_TIME_STRING)
except ValueError as e:
raise ValueError(
"{0} is not a valid Wikipedia date format".format(
repr(string)
)
)
return cls.from_time_struct(time_struct)
def __format__(self, format):
return self.strftime(format)
def __str__(self):
return self.long_format()
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
repr(self.long_format())
)
def to_json(self):
return self.long_format()
@classmethod
def from_json(cls, time_thing):
return cls(time_thing)
def __int__(self):
return self.unix()
def __float__(self):
return float(self.unix())
def unix(self):
"""
:Returns:
the number of seconds since Jan. 1st, 1970 UTC.
"""
return int(calendar.timegm(self.__time))
def __sub__(self, other):
if isinstance(other, Timestamp):
return self.unix() - other.unix()
else:
return self + (other * -1)
def __add__(self, seconds):
return Timestamp(self.unix() + seconds)
def __eq__(self, other):
try:
return self.__time == other.__time
except AttributeError:
return False
def __lt__(self, other):
try:
return self.__time < other.__time
except AttributeError:
return NotImplemented
def __gt__(self, other):
try:
return self.__time > other.__time
except AttributeError:
return NotImplemented
def __le__(self, other):
try:
return self.__time <= other.__time
except AttributeError:
return NotImplemented
def __ge__(self, other):
try:
return self.__time >= other.__time
except AttributeError:
return NotImplemented
def __ne__(self, other):
try:
return not self.__time == other.__time
except AttributeError:
return NotImplemented
def __getstate__(self):
return {'__time': self.__time}
|
mediawiki-utilities/python-mwtypes
|
mwtypes/timestamp.py
|
Timestamp.from_string
|
python
|
def from_string(cls, string):
if type(string) == bytes:
string = str(string, 'utf8')
else:
string = str(string)
try:
return cls.strptime(string, SHORT_MW_TIME_STRING)
except ValueError as e:
try:
return cls.strptime(string, LONG_MW_TIME_STRING)
except ValueError as e:
raise ValueError(
"{0} is not a valid Wikipedia date format".format(
repr(string)
)
)
return cls.from_time_struct(time_struct)
|
Constructs a :class:`mw.Timestamp` from a MediaWiki formatted string.
This method is provides a convenient way to construct from common
MediaWiki timestamp formats. E.g., ``%Y%m%d%H%M%S`` and
``%Y-%m-%dT%H:%M:%SZ``.
:Parameters:
string : str
A formatted timestamp
:Returns:
:class:`mw.Timestamp`
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/timestamp.py#L205-L236
| null |
class Timestamp(jsonable.Type):
"""
Provides a set of convenience functions for working with MediaWiki
timestamps. This class can interpret and return multiple formats as well as
perform basic mathematical operations.
:Parameters:
time_thing : `time.time_struct` | `datetime.datetime` | `str` | `int`
The timestamp type from which to construct the timestamp class.
You can make use of a lot of different *time things* to initialize a
:class:`mw.Timestamp`.
* If a :py:class:`~time.time_struct` or :py:class:`~datetime.datetime` are
provided, a `Timestamp` will be constructed from their values.
* If an `int` or `float` are provided, they will be assumed to a unix
timestamp in seconds since Jan. 1st, 1970 UTC.
* If a `str` is provided, it will be be checked against known MediaWiki
timestamp formats. E.g., ``'%Y%m%d%H%M%S'`` and ``'%Y-%m-%dT%H:%M:%SZ'``.
For example::
>>> import datetime, time
>>> from mwtypes import Timestamp
>>> Timestamp(1234567890)
Timestamp('2009-02-13T23:31:30Z')
>>> Timestamp(1234567890) == Timestamp("2009-02-13T23:31:30Z")
True
>>> Timestamp(1234567890) == Timestamp("20090213233130")
True
>>> Timestamp(1234567890) == Timestamp(datetime.datetime.utcfromtimestamp(1234567890))
True
>>> Timestamp(1234567890) == Timestamp(time.strptime("2009-02-13T23:31:30Z", "%Y-%m-%dT%H:%M:%SZ"))
True
You can also do math and comparisons of timestamps.::
>>> from mw import Timestamp
>>> t = Timestamp(1234567890)
>>> t
Timestamp('2009-02-13T23:31:30Z')
>>> t2 = t + 10
>>> t2
Timestamp('2009-02-13T23:31:40Z')
>>> t += 1
>>> t
Timestamp('2009-02-13T23:31:31Z')
>>> t2 - t
9
>>> t < t2
True
"""
__slots__ = ('__time',)
def __new__(cls, time_thing):
if isinstance(time_thing, cls):
return time_thing
elif isinstance(time_thing, time.struct_time):
return cls.from_time_struct(time_thing)
elif isinstance(time_thing, datetime.datetime):
return cls.from_datetime(time_thing)
elif type(time_thing) in (int, float):
return cls.from_unix(time_thing)
else:
return cls.from_string(time_thing)
def initialize(self, time_struct):
self.__time = time_struct
def short_format(self):
"""
Constructs a long, ``'%Y%m%d%H%M%S'`` formatted string common to the
database. This method is roughly equivalent to calling
``strftime('%Y%m%d%H%M%S')``.
:Parameters:
format : str
The string format
:Returns:
A formatted string
"""
return self.strftime(SHORT_MW_TIME_STRING)
def long_format(self):
"""
Constructs a long, ``'%Y-%m-%dT%H:%M:%SZ'`` formatted string common to the
API. This method is roughly equivalent to calling
``strftime('%Y-%m-%dT%H:%M:%SZ')``.
:Parameters:
format : str
The string format
:Returns:
A formatted string
"""
return self.strftime(LONG_MW_TIME_STRING)
def strftime(self, format):
"""
Constructs a formatted string.
See `<https://docs.python.org/3/library/time.html#time.strftime>`_ for a
discussion of formats descriptors.
:Parameters:
format : str
The format description
:Returns:
A formatted string
"""
return time.strftime(format, self.__time)
@classmethod
def strptime(cls, string, format):
"""
Constructs a :class:`mw.Timestamp` from an explicitly formatted string.
See `<https://docs.python.org/3/library/time.html#time.strftime>`_ for a
discussion of formats descriptors.
:Parameters:
string : str
A formatted timestamp
format : str
The format description
:Returns:
:class:`mw.Timestamp`
"""
return cls.from_time_struct(time.strptime(string, format))
@classmethod
def from_time_struct(cls, time_struct):
"""
Constructs a :class:`mw.Timestamp` from a :class:`time.time_struct`.
:Parameters:
time_struct : :class:`time.time_struct`
A time structure
:Returns:
:class:`mw.Timestamp`
"""
instance = super().__new__(cls, time_struct)
instance.initialize(time_struct)
return instance
@classmethod
def from_datetime(cls, dt):
"""
Constructs a :class:`mw.Timestamp` from a :class:`datetime.datetime`.
:Parameters:
dt : :class:`datetime.datetime``
A datetime.
:Returns:
:class:`mw.Timestamp`
"""
time_struct = dt.timetuple()
return cls.from_time_struct(time_struct)
@classmethod
def from_unix(cls, seconds):
"""
Constructs a :class:`mw.Timestamp` from a unix timestamp (in seconds
since Jan. 1st, 1970 UTC).
:Parameters:
seconds : int
A unix timestamp
:Returns:
:class:`mw.Timestamp`
"""
time_struct = datetime.datetime.utcfromtimestamp(seconds).timetuple()
return cls.from_time_struct(time_struct)
@classmethod
def __format__(self, format):
return self.strftime(format)
def __str__(self):
return self.long_format()
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
repr(self.long_format())
)
def to_json(self):
return self.long_format()
@classmethod
def from_json(cls, time_thing):
return cls(time_thing)
def __int__(self):
return self.unix()
def __float__(self):
return float(self.unix())
def unix(self):
"""
:Returns:
the number of seconds since Jan. 1st, 1970 UTC.
"""
return int(calendar.timegm(self.__time))
def __sub__(self, other):
if isinstance(other, Timestamp):
return self.unix() - other.unix()
else:
return self + (other * -1)
def __add__(self, seconds):
return Timestamp(self.unix() + seconds)
def __eq__(self, other):
try:
return self.__time == other.__time
except AttributeError:
return False
def __lt__(self, other):
try:
return self.__time < other.__time
except AttributeError:
return NotImplemented
def __gt__(self, other):
try:
return self.__time > other.__time
except AttributeError:
return NotImplemented
def __le__(self, other):
try:
return self.__time <= other.__time
except AttributeError:
return NotImplemented
def __ge__(self, other):
try:
return self.__time >= other.__time
except AttributeError:
return NotImplemented
def __ne__(self, other):
try:
return not self.__time == other.__time
except AttributeError:
return NotImplemented
def __getstate__(self):
return {'__time': self.__time}
|
mediawiki-utilities/python-mwtypes
|
mwtypes/files/functions.py
|
extract_extension
|
python
|
def extract_extension(path):
filename = os.path.basename(path)
parts = filename.split(".")
if len(parts) == 1:
return filename, None
else:
return ".".join(parts[:-1]), parts[-1]
|
Reads a file path and returns the extension or None if the path
contains no extension.
:Parameters:
path : str
A filesystem path
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/files/functions.py#L32-L46
| null |
import bz2
import gzip
import io
import os
from . import p7z
from ..errors import FileTypeError
FILE_READERS = {
'gz': lambda fn: gzip.open(fn, 'rt', encoding='utf-8', errors='replace'),
'bz2': lambda fn: bz2.open(fn, 'rt', encoding='utf-8', errors='replace'),
'7z': p7z.reader,
'json': lambda fn: open(fn, 'rt', encoding='utf-8', errors='replace'),
'xml': lambda fn: open(fn, 'rt', encoding='utf-8', errors='replace')
}
"""
Maps extensions to the strategy for opening/decompressing a file
"""
FILE_WRITERS = {
'gz': lambda fn: gzip.open(fn, 'wt', encoding='utf-8', errors='replace'),
'bz2': lambda fn: bz2.open(fn, 'wt', encoding='utf-8', errors='replace'),
'plaintext': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace'),
'json': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace'),
'xml': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace')
}
"""
Maps compression types to the strategy for opening/compressing a file
"""
def normalize_path(path_or_f):
"""
Verifies that a file exists at a given path and that the file has a
known extension type.
:Parameters:
path_or_f : `str` | `file`
the path to a dump file or a file handle
"""
if hasattr(path_or_f, "read"):
return path_or_f
else:
path = path_or_f
path = os.path.expanduser(path)
# Check if exists and is a file
if os.path.isdir(path):
raise IsADirectoryError("Is a directory: {0}".format(path))
elif not os.path.isfile(path):
raise FileNotFoundError("No such file: {0}".format(path))
_, extension = extract_extension(path)
if extension not in FILE_READERS:
raise FileTypeError("Extension {0} is not supported."
.format(repr(extension)))
return path
def normalize_dir(path):
if os.path.exists(path) and not os.path.isdir(path):
raise NotADirectoryError("Not a directory: {0}".format(path))
else:
os.makedirs(path, exist_ok=True)
return path
def reader(path_or_f):
"""
Turns a path to a compressed file into a file-like object of (decompressed)
data.
:Parameters:
path : `str`
the path to the dump file to read
"""
if hasattr(path_or_f, "read"):
return path_or_f
else:
path = path_or_f
path = normalize_path(path)
_, extension = extract_extension(path)
reader_func = FILE_READERS[extension]
return reader_func(path)
def output_dir_path(old_path, output_dir, compression):
filename, extension = extract_extension(old_path)
new_filename = filename + "." + compression
return os.path.join(output_dir, new_filename)
def writer(path):
"""
Creates a compressed file writer from for a path with a specified
compression type.
"""
filename, extension = extract_extension(path)
if extension in FILE_WRITERS:
writer_func = FILE_WRITERS[extension]
return writer_func(path)
else:
raise RuntimeError("Output compression {0} not supported. Type {1}"
.format(extension, tuple(FILE_WRITERS.keys())))
class ConcatinatingTextReader(io.TextIOBase):
def __init__(self, *items):
self.items = [io.StringIO(i) if isinstance(i, str) else i
for i in items]
def read(self, size=-1):
return "".join(self._read(size))
def readline(self):
if len(self.items) > 0:
line = self.items[0].readline()
if line == "":
self.items.pop(0)
else:
line = ""
return line
def _read(self, size):
if size > 0:
while len(self.items) > 0:
byte_vals = self.items[0].read(size)
yield byte_vals
if len(byte_vals) < size:
size = size - len(byte_vals) # Decrement bytes
self.items.pop(0)
else:
break
else:
for item in self.items:
yield item.read()
def concat(*stream_items):
"""
Performs a streaming concatenation of `str` or `file`.
:Parameters:
\*stream_items : `str` | `file`
A list of items to concatenate together
"""
return ConcatinatingTextReader(*stream_items)
|
mediawiki-utilities/python-mwtypes
|
mwtypes/files/functions.py
|
normalize_path
|
python
|
def normalize_path(path_or_f):
if hasattr(path_or_f, "read"):
return path_or_f
else:
path = path_or_f
path = os.path.expanduser(path)
# Check if exists and is a file
if os.path.isdir(path):
raise IsADirectoryError("Is a directory: {0}".format(path))
elif not os.path.isfile(path):
raise FileNotFoundError("No such file: {0}".format(path))
_, extension = extract_extension(path)
if extension not in FILE_READERS:
raise FileTypeError("Extension {0} is not supported."
.format(repr(extension)))
return path
|
Verifies that a file exists at a given path and that the file has a
known extension type.
:Parameters:
path_or_f : `str` | `file`
the path to a dump file or a file handle
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/files/functions.py#L49-L78
| null |
import bz2
import gzip
import io
import os
from . import p7z
from ..errors import FileTypeError
FILE_READERS = {
'gz': lambda fn: gzip.open(fn, 'rt', encoding='utf-8', errors='replace'),
'bz2': lambda fn: bz2.open(fn, 'rt', encoding='utf-8', errors='replace'),
'7z': p7z.reader,
'json': lambda fn: open(fn, 'rt', encoding='utf-8', errors='replace'),
'xml': lambda fn: open(fn, 'rt', encoding='utf-8', errors='replace')
}
"""
Maps extensions to the strategy for opening/decompressing a file
"""
FILE_WRITERS = {
'gz': lambda fn: gzip.open(fn, 'wt', encoding='utf-8', errors='replace'),
'bz2': lambda fn: bz2.open(fn, 'wt', encoding='utf-8', errors='replace'),
'plaintext': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace'),
'json': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace'),
'xml': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace')
}
"""
Maps compression types to the strategy for opening/compressing a file
"""
def extract_extension(path):
"""
Reads a file path and returns the extension or None if the path
contains no extension.
:Parameters:
path : str
A filesystem path
"""
filename = os.path.basename(path)
parts = filename.split(".")
if len(parts) == 1:
return filename, None
else:
return ".".join(parts[:-1]), parts[-1]
def normalize_dir(path):
if os.path.exists(path) and not os.path.isdir(path):
raise NotADirectoryError("Not a directory: {0}".format(path))
else:
os.makedirs(path, exist_ok=True)
return path
def reader(path_or_f):
"""
Turns a path to a compressed file into a file-like object of (decompressed)
data.
:Parameters:
path : `str`
the path to the dump file to read
"""
if hasattr(path_or_f, "read"):
return path_or_f
else:
path = path_or_f
path = normalize_path(path)
_, extension = extract_extension(path)
reader_func = FILE_READERS[extension]
return reader_func(path)
def output_dir_path(old_path, output_dir, compression):
filename, extension = extract_extension(old_path)
new_filename = filename + "." + compression
return os.path.join(output_dir, new_filename)
def writer(path):
"""
Creates a compressed file writer from for a path with a specified
compression type.
"""
filename, extension = extract_extension(path)
if extension in FILE_WRITERS:
writer_func = FILE_WRITERS[extension]
return writer_func(path)
else:
raise RuntimeError("Output compression {0} not supported. Type {1}"
.format(extension, tuple(FILE_WRITERS.keys())))
class ConcatinatingTextReader(io.TextIOBase):
def __init__(self, *items):
self.items = [io.StringIO(i) if isinstance(i, str) else i
for i in items]
def read(self, size=-1):
return "".join(self._read(size))
def readline(self):
if len(self.items) > 0:
line = self.items[0].readline()
if line == "":
self.items.pop(0)
else:
line = ""
return line
def _read(self, size):
if size > 0:
while len(self.items) > 0:
byte_vals = self.items[0].read(size)
yield byte_vals
if len(byte_vals) < size:
size = size - len(byte_vals) # Decrement bytes
self.items.pop(0)
else:
break
else:
for item in self.items:
yield item.read()
def concat(*stream_items):
"""
Performs a streaming concatenation of `str` or `file`.
:Parameters:
\*stream_items : `str` | `file`
A list of items to concatenate together
"""
return ConcatinatingTextReader(*stream_items)
|
mediawiki-utilities/python-mwtypes
|
mwtypes/files/functions.py
|
reader
|
python
|
def reader(path_or_f):
if hasattr(path_or_f, "read"):
return path_or_f
else:
path = path_or_f
path = normalize_path(path)
_, extension = extract_extension(path)
reader_func = FILE_READERS[extension]
return reader_func(path)
|
Turns a path to a compressed file into a file-like object of (decompressed)
data.
:Parameters:
path : `str`
the path to the dump file to read
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/files/functions.py#L90-L109
|
[
"def normalize_path(path_or_f):\n \"\"\"\n Verifies that a file exists at a given path and that the file has a\n known extension type.\n\n :Parameters:\n path_or_f : `str` | `file`\n the path to a dump file or a file handle\n\n \"\"\"\n if hasattr(path_or_f, \"read\"):\n return path_or_f\n else:\n path = path_or_f\n\n path = os.path.expanduser(path)\n\n # Check if exists and is a file\n if os.path.isdir(path):\n raise IsADirectoryError(\"Is a directory: {0}\".format(path))\n elif not os.path.isfile(path):\n raise FileNotFoundError(\"No such file: {0}\".format(path))\n\n _, extension = extract_extension(path)\n\n if extension not in FILE_READERS:\n raise FileTypeError(\"Extension {0} is not supported.\"\n .format(repr(extension)))\n\n return path\n",
"def extract_extension(path):\n \"\"\"\n Reads a file path and returns the extension or None if the path\n contains no extension.\n\n :Parameters:\n path : str\n A filesystem path\n \"\"\"\n filename = os.path.basename(path)\n parts = filename.split(\".\")\n if len(parts) == 1:\n return filename, None\n else:\n return \".\".join(parts[:-1]), parts[-1]\n"
] |
import bz2
import gzip
import io
import os
from . import p7z
from ..errors import FileTypeError
FILE_READERS = {
'gz': lambda fn: gzip.open(fn, 'rt', encoding='utf-8', errors='replace'),
'bz2': lambda fn: bz2.open(fn, 'rt', encoding='utf-8', errors='replace'),
'7z': p7z.reader,
'json': lambda fn: open(fn, 'rt', encoding='utf-8', errors='replace'),
'xml': lambda fn: open(fn, 'rt', encoding='utf-8', errors='replace')
}
"""
Maps extensions to the strategy for opening/decompressing a file
"""
FILE_WRITERS = {
'gz': lambda fn: gzip.open(fn, 'wt', encoding='utf-8', errors='replace'),
'bz2': lambda fn: bz2.open(fn, 'wt', encoding='utf-8', errors='replace'),
'plaintext': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace'),
'json': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace'),
'xml': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace')
}
"""
Maps compression types to the strategy for opening/compressing a file
"""
def extract_extension(path):
"""
Reads a file path and returns the extension or None if the path
contains no extension.
:Parameters:
path : str
A filesystem path
"""
filename = os.path.basename(path)
parts = filename.split(".")
if len(parts) == 1:
return filename, None
else:
return ".".join(parts[:-1]), parts[-1]
def normalize_path(path_or_f):
"""
Verifies that a file exists at a given path and that the file has a
known extension type.
:Parameters:
path_or_f : `str` | `file`
the path to a dump file or a file handle
"""
if hasattr(path_or_f, "read"):
return path_or_f
else:
path = path_or_f
path = os.path.expanduser(path)
# Check if exists and is a file
if os.path.isdir(path):
raise IsADirectoryError("Is a directory: {0}".format(path))
elif not os.path.isfile(path):
raise FileNotFoundError("No such file: {0}".format(path))
_, extension = extract_extension(path)
if extension not in FILE_READERS:
raise FileTypeError("Extension {0} is not supported."
.format(repr(extension)))
return path
def normalize_dir(path):
if os.path.exists(path) and not os.path.isdir(path):
raise NotADirectoryError("Not a directory: {0}".format(path))
else:
os.makedirs(path, exist_ok=True)
return path
def output_dir_path(old_path, output_dir, compression):
filename, extension = extract_extension(old_path)
new_filename = filename + "." + compression
return os.path.join(output_dir, new_filename)
def writer(path):
"""
Creates a compressed file writer from for a path with a specified
compression type.
"""
filename, extension = extract_extension(path)
if extension in FILE_WRITERS:
writer_func = FILE_WRITERS[extension]
return writer_func(path)
else:
raise RuntimeError("Output compression {0} not supported. Type {1}"
.format(extension, tuple(FILE_WRITERS.keys())))
class ConcatinatingTextReader(io.TextIOBase):
def __init__(self, *items):
self.items = [io.StringIO(i) if isinstance(i, str) else i
for i in items]
def read(self, size=-1):
return "".join(self._read(size))
def readline(self):
if len(self.items) > 0:
line = self.items[0].readline()
if line == "":
self.items.pop(0)
else:
line = ""
return line
def _read(self, size):
if size > 0:
while len(self.items) > 0:
byte_vals = self.items[0].read(size)
yield byte_vals
if len(byte_vals) < size:
size = size - len(byte_vals) # Decrement bytes
self.items.pop(0)
else:
break
else:
for item in self.items:
yield item.read()
def concat(*stream_items):
"""
Performs a streaming concatenation of `str` or `file`.
:Parameters:
\*stream_items : `str` | `file`
A list of items to concatenate together
"""
return ConcatinatingTextReader(*stream_items)
|
mediawiki-utilities/python-mwtypes
|
mwtypes/files/functions.py
|
writer
|
python
|
def writer(path):
filename, extension = extract_extension(path)
if extension in FILE_WRITERS:
writer_func = FILE_WRITERS[extension]
return writer_func(path)
else:
raise RuntimeError("Output compression {0} not supported. Type {1}"
.format(extension, tuple(FILE_WRITERS.keys())))
|
Creates a compressed file writer from for a path with a specified
compression type.
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/files/functions.py#L118-L129
|
[
"def extract_extension(path):\n \"\"\"\n Reads a file path and returns the extension or None if the path\n contains no extension.\n\n :Parameters:\n path : str\n A filesystem path\n \"\"\"\n filename = os.path.basename(path)\n parts = filename.split(\".\")\n if len(parts) == 1:\n return filename, None\n else:\n return \".\".join(parts[:-1]), parts[-1]\n"
] |
import bz2
import gzip
import io
import os
from . import p7z
from ..errors import FileTypeError
FILE_READERS = {
'gz': lambda fn: gzip.open(fn, 'rt', encoding='utf-8', errors='replace'),
'bz2': lambda fn: bz2.open(fn, 'rt', encoding='utf-8', errors='replace'),
'7z': p7z.reader,
'json': lambda fn: open(fn, 'rt', encoding='utf-8', errors='replace'),
'xml': lambda fn: open(fn, 'rt', encoding='utf-8', errors='replace')
}
"""
Maps extensions to the strategy for opening/decompressing a file
"""
FILE_WRITERS = {
'gz': lambda fn: gzip.open(fn, 'wt', encoding='utf-8', errors='replace'),
'bz2': lambda fn: bz2.open(fn, 'wt', encoding='utf-8', errors='replace'),
'plaintext': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace'),
'json': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace'),
'xml': lambda fn: open(fn, 'wt', encoding='utf-8', errors='replace')
}
"""
Maps compression types to the strategy for opening/compressing a file
"""
def extract_extension(path):
"""
Reads a file path and returns the extension or None if the path
contains no extension.
:Parameters:
path : str
A filesystem path
"""
filename = os.path.basename(path)
parts = filename.split(".")
if len(parts) == 1:
return filename, None
else:
return ".".join(parts[:-1]), parts[-1]
def normalize_path(path_or_f):
"""
Verifies that a file exists at a given path and that the file has a
known extension type.
:Parameters:
path_or_f : `str` | `file`
the path to a dump file or a file handle
"""
if hasattr(path_or_f, "read"):
return path_or_f
else:
path = path_or_f
path = os.path.expanduser(path)
# Check if exists and is a file
if os.path.isdir(path):
raise IsADirectoryError("Is a directory: {0}".format(path))
elif not os.path.isfile(path):
raise FileNotFoundError("No such file: {0}".format(path))
_, extension = extract_extension(path)
if extension not in FILE_READERS:
raise FileTypeError("Extension {0} is not supported."
.format(repr(extension)))
return path
def normalize_dir(path):
if os.path.exists(path) and not os.path.isdir(path):
raise NotADirectoryError("Not a directory: {0}".format(path))
else:
os.makedirs(path, exist_ok=True)
return path
def reader(path_or_f):
"""
Turns a path to a compressed file into a file-like object of (decompressed)
data.
:Parameters:
path : `str`
the path to the dump file to read
"""
if hasattr(path_or_f, "read"):
return path_or_f
else:
path = path_or_f
path = normalize_path(path)
_, extension = extract_extension(path)
reader_func = FILE_READERS[extension]
return reader_func(path)
def output_dir_path(old_path, output_dir, compression):
filename, extension = extract_extension(old_path)
new_filename = filename + "." + compression
return os.path.join(output_dir, new_filename)
class ConcatinatingTextReader(io.TextIOBase):
def __init__(self, *items):
self.items = [io.StringIO(i) if isinstance(i, str) else i
for i in items]
def read(self, size=-1):
return "".join(self._read(size))
def readline(self):
if len(self.items) > 0:
line = self.items[0].readline()
if line == "":
self.items.pop(0)
else:
line = ""
return line
def _read(self, size):
if size > 0:
while len(self.items) > 0:
byte_vals = self.items[0].read(size)
yield byte_vals
if len(byte_vals) < size:
size = size - len(byte_vals) # Decrement bytes
self.items.pop(0)
else:
break
else:
for item in self.items:
yield item.read()
def concat(*stream_items):
"""
Performs a streaming concatenation of `str` or `file`.
:Parameters:
\*stream_items : `str` | `file`
A list of items to concatenate together
"""
return ConcatinatingTextReader(*stream_items)
|
mediawiki-utilities/python-mwtypes
|
mwtypes/revision.py
|
Deleted.initialize
|
python
|
def initialize(self, text=None, comment=None, user=None, restricted=None):
self.text = none_or(text, bool)
self.comment = none_or(comment, bool)
"""
Is the comment of this revision deleted/suppressed? : `bool`
"""
self.user = none_or(user, bool)
"""
Is the user of this revision deleted/suppressed? : `bool`
"""
self.restricted = none_or(restricted, bool)
"""
Is the revision restricted? : `bool`
"""
|
Is the text of this revision deleted/suppressed? : `bool`
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/revision.py#L39-L58
|
[
"def none_or(val, func):\n if val is None:\n return None\n else:\n return func(val)\n"
] |
class Deleted(jsonable.Type):
"""
Represents information about the deleted/suppressed status of a revision
and it's associated data.
:Attributes:
.. autoattribute:: mwtypes.revision.Deleted.text
:annotation: = Is the text of this revision deleted/suppressed? :
bool | None
.. autoattribute:: mwtypes.revision.Deleted.comment
:annotation: = Is the text of this revision deleted/suppressed? :
bool | None
.. autoattribute:: mwtypes.revision.Deleted.user
:annotation: = Is the user of this revision deleted/suppressed? :
bool | None
.. autoattribute:: mwtypes.revision.Deleted.restricted
:annotation: = Is the revision restricted? : bool | None
"""
__slots__ = ('text', 'comment', 'user', 'restricted')
@classmethod
def from_int(cls, integer):
"""
Constructs a `Deleted` using the `tinyint` value of the `rev_deleted`
column of the `revision` MariaDB table.
* DELETED_TEXT = 1
* DELETED_COMMENT = 2
* DELETED_USER = 4
* DELETED_RESTRICTED = 8
"""
bin_string = bin(integer)
return cls(
text=len(bin_string) >= 1 and bin_string[-1] == "1",
comment=len(bin_string) >= 2 and bin_string[-2] == "1",
user=len(bin_string) >= 3 and bin_string[-3] == "1",
restricted=len(bin_string) >= 4 and bin_string[-4] == "1"
)
|
mediawiki-utilities/python-mwtypes
|
mwtypes/revision.py
|
Deleted.from_int
|
python
|
def from_int(cls, integer):
bin_string = bin(integer)
return cls(
text=len(bin_string) >= 1 and bin_string[-1] == "1",
comment=len(bin_string) >= 2 and bin_string[-2] == "1",
user=len(bin_string) >= 3 and bin_string[-3] == "1",
restricted=len(bin_string) >= 4 and bin_string[-4] == "1"
)
|
Constructs a `Deleted` using the `tinyint` value of the `rev_deleted`
column of the `revision` MariaDB table.
* DELETED_TEXT = 1
* DELETED_COMMENT = 2
* DELETED_USER = 4
* DELETED_RESTRICTED = 8
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/revision.py#L61-L78
| null |
class Deleted(jsonable.Type):
"""
Represents information about the deleted/suppressed status of a revision
and it's associated data.
:Attributes:
.. autoattribute:: mwtypes.revision.Deleted.text
:annotation: = Is the text of this revision deleted/suppressed? :
bool | None
.. autoattribute:: mwtypes.revision.Deleted.comment
:annotation: = Is the text of this revision deleted/suppressed? :
bool | None
.. autoattribute:: mwtypes.revision.Deleted.user
:annotation: = Is the user of this revision deleted/suppressed? :
bool | None
.. autoattribute:: mwtypes.revision.Deleted.restricted
:annotation: = Is the revision restricted? : bool | None
"""
__slots__ = ('text', 'comment', 'user', 'restricted')
def initialize(self, text=None, comment=None, user=None, restricted=None):
self.text = none_or(text, bool)
"""
Is the text of this revision deleted/suppressed? : `bool`
"""
self.comment = none_or(comment, bool)
"""
Is the comment of this revision deleted/suppressed? : `bool`
"""
self.user = none_or(user, bool)
"""
Is the user of this revision deleted/suppressed? : `bool`
"""
self.restricted = none_or(restricted, bool)
"""
Is the revision restricted? : `bool`
"""
@classmethod
|
mediawiki-utilities/python-mwtypes
|
mwtypes/revision.py
|
Revision.initialize
|
python
|
def initialize(self, id, timestamp=None, user=None, page=None, minor=None,
comment=None, text=None, bytes=None, sha1=None,
parent_id=None, model=None, format=None, deleted=None):
self.id = none_or(id, int)
self.timestamp = none_or(timestamp, Timestamp)
"""
Revision timestamp : :class:`mwtypes.Timestamp`
"""
self.user = none_or(user, User)
"""
Contributing user metadata : :class:`~mwtypes.User`
"""
self.page = none_or(page, Page)
"""
Page metadata : :class:`~mwtypes.Page`
"""
self.minor = none_or(minor, bool)
"""
Is revision a minor change? : `bool`
"""
self.comment = none_or(comment, str)
"""
Comment left with revision : `str`
"""
self.text = none_or(text, str)
"""
Content of text : `str`
"""
self.bytes = none_or(bytes, int)
"""
Number of bytes of content : `int`
"""
self.sha1 = none_or(sha1, str)
"""
sha1 hash of the content : `str`
"""
self.parent_id = none_or(parent_id, int)
"""
Revision ID of preceding revision : `int` | `None`
"""
self.model = none_or(model, str)
"""
TODO: ??? : `str`
"""
self.format = none_or(format, str)
"""
TODO: ??? : `str`
"""
self.deleted = none_or(deleted, self.Deleted)
"""
The deleted/suppressed status of the revision.
"""
|
Revision ID : `int`
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/revision.py#L136-L203
|
[
"def none_or(val, func):\n if val is None:\n return None\n else:\n return func(val)\n"
] |
class Revision(jsonable.Type):
"""
Revision metadata and text
:Attributes:
.. autoattribute:: mwtypes.Revision.id
:annotation: = Revision ID : int
.. autoattribute:: mwtypes.Revision.timestamp
:annotation: = Revision timestamp :
mwtypes.Timestamp | None
.. autoattribute:: mwtypes.Revision.user
:annotation: = Contributing user metadata :
mwtypes.User` | None
.. autoattribute:: mwtypes.Revision.page
:annotation: = Page metadata :
mwtypes.Page | None
.. autoattribute:: mwtypes.Revision.minor
:annotation: = Is revision a minor change? : bool | None
.. autoattribute:: mwtypes.Revision.comment
:annotation: = Comment left with revision : str | None
.. autoattribute:: mwtypes.Revision.text
:annotation: = Content of text : str | None
.. autoattribute:: mwtypes.Revision.bytes
:annotation: = Number of bytes of content : int | None
.. autoattribute:: mwtypes.Revision.sha1
:annotation: = sha1 hash of the content : str | None
.. autoattribute:: mwtypes.Revision.parent_id
:annotation: = Revision ID of preceding revision : int | None
.. autoattribute:: mwtypes.Revision.model
:annotation: = TODO: ??? : str | None
.. autoattribute:: mwtypes.Revision.format
:annotation: = TODO: ??? : str | None
.. autoattribute:: mwtypes.Revision.deleted
:annotation: = The deleted/suppressed status of the revision :
mwtypes.revision.Deleted | None
"""
__slots__ = ('id', 'timestamp', 'user', 'page', 'minor', 'comment',
'text', 'bytes', 'sha1', 'parent_id', 'model', 'format',
'deleted')
User = User
Deleted = Deleted
|
mediawiki-utilities/python-mwtypes
|
mwtypes/upload.py
|
Upload.initialize
|
python
|
def initialize(self, timestamp=None, user=None, comment=None,
filename=None, source=None, size=None):
self.timestamp = none_or(timestamp, Timestamp)
self.user = none_or(user, User)
"""
Contributing user metadata : :class:`~mwtypes.User`
"""
self.comment = none_or(comment, str)
"""
Comment left with upload : str | None
"""
self.filename = none_or(filename, str)
"""
File name without "File:" prefix and "_" instead of spaces : str | None
"""
self.source = none_or(source, str)
"""
A URI : str | None
"""
self.size = none_or(size, int)
"""
Number of bytes of content : int | None
"""
|
Upload timestamp : mwtypes.Timestamp | None
|
train
|
https://github.com/mediawiki-utilities/python-mwtypes/blob/d996562e40437a7fff39a1c00fb5544b5708b5ed/mwtypes/upload.py#L40-L71
|
[
"def none_or(val, func):\n if val is None:\n return None\n else:\n return func(val)\n"
] |
class Upload(jsonable.Type):
"""
Upload event metadata
:Attributes:
.. autoattribute:: mwtypes.Upload.timestamp
:annotation: = Upload timestamp : mwtypes.Timestamp | None
.. autoattribute:: mwtypes.Upload.user
:annotation: = Contributing user metadata : mwtypes.User` | None
.. autoattribute:: mwtypes.Upload.comment
:annotation: = Comment left with upload : str | None
.. autoattribute:: mwtypes.Upload.filename
:annotation: = File name without "File:" prefix and "_"
instead of spaces : str | None
.. autoattribute:: mwtypes.Upload.source
:annotation: = A URI : str | None
.. autoattribute:: mwtypes.Upload.size
:annotation: = Number of bytes of content : int | None
"""
__slots__ = ('timestamp', 'user', 'comment', 'filename', 'source', 'size')
|
hamperbot/hamper
|
hamper/commander.py
|
CommanderProtocol.signedOn
|
python
|
def signedOn(self):
log.info("Signed on as %s.", self.nickname)
if not self.password:
# We aren't wating for auth, join all the channels
self.joinChannels()
else:
self.msg("NickServ", "IDENTIFY %s" % self.password)
|
Called after successfully signing on to the server.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/commander.py#L57-L64
|
[
"def joinChannels(self):\n self.dispatch('presence', 'signedOn')\n for c in self.factory.channels:\n self.join(*c)\n"
] |
class CommanderProtocol(irc.IRCClient):
"""Interacts with a single server, and delegates to the plugins."""
# #### Properties #####
@property
def nickname(self):
return self.factory.nickname
@property
def password(self):
return self.factory.password
@property
def db(self):
return self.factory.loader.db
@property
def acl(self):
return self.factory.acl
# #### Twisted events #####
def joinChannels(self):
self.dispatch('presence', 'signedOn')
for c in self.factory.channels:
self.join(*c)
def joined(self, channel):
"""Called after successfully joining a channel."""
log.info("Joined %s.", channel)
# ask for the current list of users in the channel
self.dispatch('presence', 'joined', channel)
def left(self, channel):
"""Called after leaving a channel."""
log.info("Left %s.", channel)
self.dispatch('presence', 'left', channel)
def action(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def privmsg(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def process_action(self, raw_user, channel, raw_message):
"""Called when a message is received from a channel or user."""
log.info("%s %s %s", channel, raw_user, raw_message)
if not raw_user:
# ignore server messages
return
# This monster of a regex extracts msg and target from a message, where
# the target may not be there, and the target is a valid irc name.
# Valid ways to target someone are "<nick>: ..." and "<nick>, ..."
target, message = re.match(
r'^(?:([a-z_\-\[\]\\^{}|`]' # First letter can't be a number
'[a-z0-9_\-\[\]\\^{}|`]*)' # The rest can be many things
'[:,] )? *(.*)$', # The actual message
raw_message, re.I).groups()
pm = channel == self.nickname
if pm:
directed = True
if target:
if target.lower() == self.nickname.lower():
directed = True
else:
directed = False
message = '{0}: {1}'.format(target, message)
else:
directed = False
if message.startswith('!'):
message = message[1:]
directed = True
if directed:
message = message.rstrip()
try:
user, mask = raw_user.split('!', 1)
except ValueError:
user = raw_user
mask = ''
comm = {
'raw_message': raw_message,
'message': message,
'raw_user': raw_user,
'user': user,
'mask': mask,
'target': target,
'channel': channel,
'directed': directed,
'pm': pm,
}
self.dispatch('chat', 'message', comm)
self.factory.history.setdefault(
channel, deque(maxlen=100)).append(comm)
def connectionLost(self, reason):
"""Called when the connection is lost to the server."""
self.factory.loader.db.session.commit()
if reactor.running:
reactor.stop()
def userJoined(self, user, channel):
"""Called when I see another user joining a channel."""
self.dispatch('population', 'userJoined', user, channel)
def userLeft(self, user, channel):
"""Called when I see another user leaving a channel."""
self.dispatch('population', 'userLeft', user, channel)
def userQuit(self, user, quitmessage):
"""Called when I see another user quitting."""
self.dispatch('population', 'userQuit', user, quitmessage)
def userKicked(self, kickee, channel, kicker, message):
"""Called when I see another user get kicked."""
self.dispatch('population', 'userKicked', kickee, channel, kicker,
message)
def irc_RPL_NAMREPLY(self, prefix, params):
"""Called when the server responds to my names request"""
self.dispatch('population', 'namesReply', prefix, params)
def irc_RPL_ENDOFNAMES(self, prefix, params):
"""Called after the names request is finished"""
self.dispatch('population', 'namesEnd', prefix, params)
def noticed(self, user, channel, message):
log.info("NOTICE %s %s %s" % (user, channel, message))
# mozilla's nickserv responds as NickServ!services@mozilla.org
if (self.password and channel == self.nickname and
user.startswith('NickServ')):
if ("Password accepted" in message or
"You are now identified" in message):
self.joinChannels()
elif "Password incorrect" in message:
log.info("NickServ AUTH FAILED!!!!!!!")
reactor.stop()
# #### Hamper specific functions. #####
def dispatch(self, category, func, *args):
"""Dispatch an event to all listening plugins."""
self.factory.loader.runPlugins(category, func, self, *args)
def _hamper_send(self, func, comm, message, encode, tag, vars, kwvars):
if type(message) == str:
log.warning('Warning, passing message as ascii instead of unicode '
'will cause problems. The message is: {0}'
.format(message))
format_kwargs = {}
format_kwargs.update(kwvars)
format_kwargs.update(comm)
try:
message = message.format(*vars, **format_kwargs)
except (ValueError, KeyError, IndexError) as e:
log.error('Could not format message: {e}'.format(e=e))
if encode:
message = message.encode('utf8')
if comm['pm']:
func(comm['user'], message)
else:
func(comm['channel'], message)
(self.factory.sent_messages
.setdefault(comm['channel'], deque(maxlen=100))
.append({
'comm': comm,
'message': message,
'tag': tag,
}))
def reply(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(self.msg, comm, message, encode, tag, vars, kwvars)
def me(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(
self.describe, comm, message, encode, tag, vars, kwvars)
|
hamperbot/hamper
|
hamper/commander.py
|
CommanderProtocol.process_action
|
python
|
def process_action(self, raw_user, channel, raw_message):
log.info("%s %s %s", channel, raw_user, raw_message)
if not raw_user:
# ignore server messages
return
# This monster of a regex extracts msg and target from a message, where
# the target may not be there, and the target is a valid irc name.
# Valid ways to target someone are "<nick>: ..." and "<nick>, ..."
target, message = re.match(
r'^(?:([a-z_\-\[\]\\^{}|`]' # First letter can't be a number
'[a-z0-9_\-\[\]\\^{}|`]*)' # The rest can be many things
'[:,] )? *(.*)$', # The actual message
raw_message, re.I).groups()
pm = channel == self.nickname
if pm:
directed = True
if target:
if target.lower() == self.nickname.lower():
directed = True
else:
directed = False
message = '{0}: {1}'.format(target, message)
else:
directed = False
if message.startswith('!'):
message = message[1:]
directed = True
if directed:
message = message.rstrip()
try:
user, mask = raw_user.split('!', 1)
except ValueError:
user = raw_user
mask = ''
comm = {
'raw_message': raw_message,
'message': message,
'raw_user': raw_user,
'user': user,
'mask': mask,
'target': target,
'channel': channel,
'directed': directed,
'pm': pm,
}
self.dispatch('chat', 'message', comm)
self.factory.history.setdefault(
channel, deque(maxlen=100)).append(comm)
|
Called when a message is received from a channel or user.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/commander.py#L88-L144
|
[
"def dispatch(self, category, func, *args):\n \"\"\"Dispatch an event to all listening plugins.\"\"\"\n self.factory.loader.runPlugins(category, func, self, *args)\n"
] |
class CommanderProtocol(irc.IRCClient):
"""Interacts with a single server, and delegates to the plugins."""
# #### Properties #####
@property
def nickname(self):
return self.factory.nickname
@property
def password(self):
return self.factory.password
@property
def db(self):
return self.factory.loader.db
@property
def acl(self):
return self.factory.acl
# #### Twisted events #####
def signedOn(self):
"""Called after successfully signing on to the server."""
log.info("Signed on as %s.", self.nickname)
if not self.password:
# We aren't wating for auth, join all the channels
self.joinChannels()
else:
self.msg("NickServ", "IDENTIFY %s" % self.password)
def joinChannels(self):
self.dispatch('presence', 'signedOn')
for c in self.factory.channels:
self.join(*c)
def joined(self, channel):
"""Called after successfully joining a channel."""
log.info("Joined %s.", channel)
# ask for the current list of users in the channel
self.dispatch('presence', 'joined', channel)
def left(self, channel):
"""Called after leaving a channel."""
log.info("Left %s.", channel)
self.dispatch('presence', 'left', channel)
def action(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def privmsg(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def connectionLost(self, reason):
"""Called when the connection is lost to the server."""
self.factory.loader.db.session.commit()
if reactor.running:
reactor.stop()
def userJoined(self, user, channel):
"""Called when I see another user joining a channel."""
self.dispatch('population', 'userJoined', user, channel)
def userLeft(self, user, channel):
"""Called when I see another user leaving a channel."""
self.dispatch('population', 'userLeft', user, channel)
def userQuit(self, user, quitmessage):
"""Called when I see another user quitting."""
self.dispatch('population', 'userQuit', user, quitmessage)
def userKicked(self, kickee, channel, kicker, message):
"""Called when I see another user get kicked."""
self.dispatch('population', 'userKicked', kickee, channel, kicker,
message)
def irc_RPL_NAMREPLY(self, prefix, params):
"""Called when the server responds to my names request"""
self.dispatch('population', 'namesReply', prefix, params)
def irc_RPL_ENDOFNAMES(self, prefix, params):
"""Called after the names request is finished"""
self.dispatch('population', 'namesEnd', prefix, params)
def noticed(self, user, channel, message):
log.info("NOTICE %s %s %s" % (user, channel, message))
# mozilla's nickserv responds as NickServ!services@mozilla.org
if (self.password and channel == self.nickname and
user.startswith('NickServ')):
if ("Password accepted" in message or
"You are now identified" in message):
self.joinChannels()
elif "Password incorrect" in message:
log.info("NickServ AUTH FAILED!!!!!!!")
reactor.stop()
# #### Hamper specific functions. #####
def dispatch(self, category, func, *args):
"""Dispatch an event to all listening plugins."""
self.factory.loader.runPlugins(category, func, self, *args)
def _hamper_send(self, func, comm, message, encode, tag, vars, kwvars):
if type(message) == str:
log.warning('Warning, passing message as ascii instead of unicode '
'will cause problems. The message is: {0}'
.format(message))
format_kwargs = {}
format_kwargs.update(kwvars)
format_kwargs.update(comm)
try:
message = message.format(*vars, **format_kwargs)
except (ValueError, KeyError, IndexError) as e:
log.error('Could not format message: {e}'.format(e=e))
if encode:
message = message.encode('utf8')
if comm['pm']:
func(comm['user'], message)
else:
func(comm['channel'], message)
(self.factory.sent_messages
.setdefault(comm['channel'], deque(maxlen=100))
.append({
'comm': comm,
'message': message,
'tag': tag,
}))
def reply(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(self.msg, comm, message, encode, tag, vars, kwvars)
def me(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(
self.describe, comm, message, encode, tag, vars, kwvars)
|
hamperbot/hamper
|
hamper/commander.py
|
CommanderProtocol.connectionLost
|
python
|
def connectionLost(self, reason):
self.factory.loader.db.session.commit()
if reactor.running:
reactor.stop()
|
Called when the connection is lost to the server.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/commander.py#L146-L150
| null |
class CommanderProtocol(irc.IRCClient):
"""Interacts with a single server, and delegates to the plugins."""
# #### Properties #####
@property
def nickname(self):
return self.factory.nickname
@property
def password(self):
return self.factory.password
@property
def db(self):
return self.factory.loader.db
@property
def acl(self):
return self.factory.acl
# #### Twisted events #####
def signedOn(self):
"""Called after successfully signing on to the server."""
log.info("Signed on as %s.", self.nickname)
if not self.password:
# We aren't wating for auth, join all the channels
self.joinChannels()
else:
self.msg("NickServ", "IDENTIFY %s" % self.password)
def joinChannels(self):
self.dispatch('presence', 'signedOn')
for c in self.factory.channels:
self.join(*c)
def joined(self, channel):
"""Called after successfully joining a channel."""
log.info("Joined %s.", channel)
# ask for the current list of users in the channel
self.dispatch('presence', 'joined', channel)
def left(self, channel):
"""Called after leaving a channel."""
log.info("Left %s.", channel)
self.dispatch('presence', 'left', channel)
def action(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def privmsg(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def process_action(self, raw_user, channel, raw_message):
"""Called when a message is received from a channel or user."""
log.info("%s %s %s", channel, raw_user, raw_message)
if not raw_user:
# ignore server messages
return
# This monster of a regex extracts msg and target from a message, where
# the target may not be there, and the target is a valid irc name.
# Valid ways to target someone are "<nick>: ..." and "<nick>, ..."
target, message = re.match(
r'^(?:([a-z_\-\[\]\\^{}|`]' # First letter can't be a number
'[a-z0-9_\-\[\]\\^{}|`]*)' # The rest can be many things
'[:,] )? *(.*)$', # The actual message
raw_message, re.I).groups()
pm = channel == self.nickname
if pm:
directed = True
if target:
if target.lower() == self.nickname.lower():
directed = True
else:
directed = False
message = '{0}: {1}'.format(target, message)
else:
directed = False
if message.startswith('!'):
message = message[1:]
directed = True
if directed:
message = message.rstrip()
try:
user, mask = raw_user.split('!', 1)
except ValueError:
user = raw_user
mask = ''
comm = {
'raw_message': raw_message,
'message': message,
'raw_user': raw_user,
'user': user,
'mask': mask,
'target': target,
'channel': channel,
'directed': directed,
'pm': pm,
}
self.dispatch('chat', 'message', comm)
self.factory.history.setdefault(
channel, deque(maxlen=100)).append(comm)
def userJoined(self, user, channel):
"""Called when I see another user joining a channel."""
self.dispatch('population', 'userJoined', user, channel)
def userLeft(self, user, channel):
"""Called when I see another user leaving a channel."""
self.dispatch('population', 'userLeft', user, channel)
def userQuit(self, user, quitmessage):
"""Called when I see another user quitting."""
self.dispatch('population', 'userQuit', user, quitmessage)
def userKicked(self, kickee, channel, kicker, message):
"""Called when I see another user get kicked."""
self.dispatch('population', 'userKicked', kickee, channel, kicker,
message)
def irc_RPL_NAMREPLY(self, prefix, params):
"""Called when the server responds to my names request"""
self.dispatch('population', 'namesReply', prefix, params)
def irc_RPL_ENDOFNAMES(self, prefix, params):
"""Called after the names request is finished"""
self.dispatch('population', 'namesEnd', prefix, params)
def noticed(self, user, channel, message):
log.info("NOTICE %s %s %s" % (user, channel, message))
# mozilla's nickserv responds as NickServ!services@mozilla.org
if (self.password and channel == self.nickname and
user.startswith('NickServ')):
if ("Password accepted" in message or
"You are now identified" in message):
self.joinChannels()
elif "Password incorrect" in message:
log.info("NickServ AUTH FAILED!!!!!!!")
reactor.stop()
# #### Hamper specific functions. #####
def dispatch(self, category, func, *args):
"""Dispatch an event to all listening plugins."""
self.factory.loader.runPlugins(category, func, self, *args)
def _hamper_send(self, func, comm, message, encode, tag, vars, kwvars):
if type(message) == str:
log.warning('Warning, passing message as ascii instead of unicode '
'will cause problems. The message is: {0}'
.format(message))
format_kwargs = {}
format_kwargs.update(kwvars)
format_kwargs.update(comm)
try:
message = message.format(*vars, **format_kwargs)
except (ValueError, KeyError, IndexError) as e:
log.error('Could not format message: {e}'.format(e=e))
if encode:
message = message.encode('utf8')
if comm['pm']:
func(comm['user'], message)
else:
func(comm['channel'], message)
(self.factory.sent_messages
.setdefault(comm['channel'], deque(maxlen=100))
.append({
'comm': comm,
'message': message,
'tag': tag,
}))
def reply(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(self.msg, comm, message, encode, tag, vars, kwvars)
def me(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(
self.describe, comm, message, encode, tag, vars, kwvars)
|
hamperbot/hamper
|
hamper/commander.py
|
CommanderProtocol.userKicked
|
python
|
def userKicked(self, kickee, channel, kicker, message):
self.dispatch('population', 'userKicked', kickee, channel, kicker,
message)
|
Called when I see another user get kicked.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/commander.py#L164-L167
|
[
"def dispatch(self, category, func, *args):\n \"\"\"Dispatch an event to all listening plugins.\"\"\"\n self.factory.loader.runPlugins(category, func, self, *args)\n"
] |
class CommanderProtocol(irc.IRCClient):
"""Interacts with a single server, and delegates to the plugins."""
# #### Properties #####
@property
def nickname(self):
return self.factory.nickname
@property
def password(self):
return self.factory.password
@property
def db(self):
return self.factory.loader.db
@property
def acl(self):
return self.factory.acl
# #### Twisted events #####
def signedOn(self):
"""Called after successfully signing on to the server."""
log.info("Signed on as %s.", self.nickname)
if not self.password:
# We aren't wating for auth, join all the channels
self.joinChannels()
else:
self.msg("NickServ", "IDENTIFY %s" % self.password)
def joinChannels(self):
self.dispatch('presence', 'signedOn')
for c in self.factory.channels:
self.join(*c)
def joined(self, channel):
"""Called after successfully joining a channel."""
log.info("Joined %s.", channel)
# ask for the current list of users in the channel
self.dispatch('presence', 'joined', channel)
def left(self, channel):
"""Called after leaving a channel."""
log.info("Left %s.", channel)
self.dispatch('presence', 'left', channel)
def action(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def privmsg(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def process_action(self, raw_user, channel, raw_message):
"""Called when a message is received from a channel or user."""
log.info("%s %s %s", channel, raw_user, raw_message)
if not raw_user:
# ignore server messages
return
# This monster of a regex extracts msg and target from a message, where
# the target may not be there, and the target is a valid irc name.
# Valid ways to target someone are "<nick>: ..." and "<nick>, ..."
target, message = re.match(
r'^(?:([a-z_\-\[\]\\^{}|`]' # First letter can't be a number
'[a-z0-9_\-\[\]\\^{}|`]*)' # The rest can be many things
'[:,] )? *(.*)$', # The actual message
raw_message, re.I).groups()
pm = channel == self.nickname
if pm:
directed = True
if target:
if target.lower() == self.nickname.lower():
directed = True
else:
directed = False
message = '{0}: {1}'.format(target, message)
else:
directed = False
if message.startswith('!'):
message = message[1:]
directed = True
if directed:
message = message.rstrip()
try:
user, mask = raw_user.split('!', 1)
except ValueError:
user = raw_user
mask = ''
comm = {
'raw_message': raw_message,
'message': message,
'raw_user': raw_user,
'user': user,
'mask': mask,
'target': target,
'channel': channel,
'directed': directed,
'pm': pm,
}
self.dispatch('chat', 'message', comm)
self.factory.history.setdefault(
channel, deque(maxlen=100)).append(comm)
def connectionLost(self, reason):
"""Called when the connection is lost to the server."""
self.factory.loader.db.session.commit()
if reactor.running:
reactor.stop()
def userJoined(self, user, channel):
"""Called when I see another user joining a channel."""
self.dispatch('population', 'userJoined', user, channel)
def userLeft(self, user, channel):
"""Called when I see another user leaving a channel."""
self.dispatch('population', 'userLeft', user, channel)
def userQuit(self, user, quitmessage):
"""Called when I see another user quitting."""
self.dispatch('population', 'userQuit', user, quitmessage)
def irc_RPL_NAMREPLY(self, prefix, params):
"""Called when the server responds to my names request"""
self.dispatch('population', 'namesReply', prefix, params)
def irc_RPL_ENDOFNAMES(self, prefix, params):
"""Called after the names request is finished"""
self.dispatch('population', 'namesEnd', prefix, params)
def noticed(self, user, channel, message):
log.info("NOTICE %s %s %s" % (user, channel, message))
# mozilla's nickserv responds as NickServ!services@mozilla.org
if (self.password and channel == self.nickname and
user.startswith('NickServ')):
if ("Password accepted" in message or
"You are now identified" in message):
self.joinChannels()
elif "Password incorrect" in message:
log.info("NickServ AUTH FAILED!!!!!!!")
reactor.stop()
# #### Hamper specific functions. #####
def dispatch(self, category, func, *args):
"""Dispatch an event to all listening plugins."""
self.factory.loader.runPlugins(category, func, self, *args)
def _hamper_send(self, func, comm, message, encode, tag, vars, kwvars):
if type(message) == str:
log.warning('Warning, passing message as ascii instead of unicode '
'will cause problems. The message is: {0}'
.format(message))
format_kwargs = {}
format_kwargs.update(kwvars)
format_kwargs.update(comm)
try:
message = message.format(*vars, **format_kwargs)
except (ValueError, KeyError, IndexError) as e:
log.error('Could not format message: {e}'.format(e=e))
if encode:
message = message.encode('utf8')
if comm['pm']:
func(comm['user'], message)
else:
func(comm['channel'], message)
(self.factory.sent_messages
.setdefault(comm['channel'], deque(maxlen=100))
.append({
'comm': comm,
'message': message,
'tag': tag,
}))
def reply(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(self.msg, comm, message, encode, tag, vars, kwvars)
def me(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(
self.describe, comm, message, encode, tag, vars, kwvars)
|
hamperbot/hamper
|
hamper/commander.py
|
CommanderProtocol.dispatch
|
python
|
def dispatch(self, category, func, *args):
self.factory.loader.runPlugins(category, func, self, *args)
|
Dispatch an event to all listening plugins.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/commander.py#L191-L193
| null |
class CommanderProtocol(irc.IRCClient):
"""Interacts with a single server, and delegates to the plugins."""
# #### Properties #####
@property
def nickname(self):
return self.factory.nickname
@property
def password(self):
return self.factory.password
@property
def db(self):
return self.factory.loader.db
@property
def acl(self):
return self.factory.acl
# #### Twisted events #####
def signedOn(self):
"""Called after successfully signing on to the server."""
log.info("Signed on as %s.", self.nickname)
if not self.password:
# We aren't wating for auth, join all the channels
self.joinChannels()
else:
self.msg("NickServ", "IDENTIFY %s" % self.password)
def joinChannels(self):
self.dispatch('presence', 'signedOn')
for c in self.factory.channels:
self.join(*c)
def joined(self, channel):
"""Called after successfully joining a channel."""
log.info("Joined %s.", channel)
# ask for the current list of users in the channel
self.dispatch('presence', 'joined', channel)
def left(self, channel):
"""Called after leaving a channel."""
log.info("Left %s.", channel)
self.dispatch('presence', 'left', channel)
def action(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def privmsg(self, raw_user, channel, raw_message):
return self.process_action(raw_user, channel, raw_message)
def process_action(self, raw_user, channel, raw_message):
"""Called when a message is received from a channel or user."""
log.info("%s %s %s", channel, raw_user, raw_message)
if not raw_user:
# ignore server messages
return
# This monster of a regex extracts msg and target from a message, where
# the target may not be there, and the target is a valid irc name.
# Valid ways to target someone are "<nick>: ..." and "<nick>, ..."
target, message = re.match(
r'^(?:([a-z_\-\[\]\\^{}|`]' # First letter can't be a number
'[a-z0-9_\-\[\]\\^{}|`]*)' # The rest can be many things
'[:,] )? *(.*)$', # The actual message
raw_message, re.I).groups()
pm = channel == self.nickname
if pm:
directed = True
if target:
if target.lower() == self.nickname.lower():
directed = True
else:
directed = False
message = '{0}: {1}'.format(target, message)
else:
directed = False
if message.startswith('!'):
message = message[1:]
directed = True
if directed:
message = message.rstrip()
try:
user, mask = raw_user.split('!', 1)
except ValueError:
user = raw_user
mask = ''
comm = {
'raw_message': raw_message,
'message': message,
'raw_user': raw_user,
'user': user,
'mask': mask,
'target': target,
'channel': channel,
'directed': directed,
'pm': pm,
}
self.dispatch('chat', 'message', comm)
self.factory.history.setdefault(
channel, deque(maxlen=100)).append(comm)
def connectionLost(self, reason):
"""Called when the connection is lost to the server."""
self.factory.loader.db.session.commit()
if reactor.running:
reactor.stop()
def userJoined(self, user, channel):
"""Called when I see another user joining a channel."""
self.dispatch('population', 'userJoined', user, channel)
def userLeft(self, user, channel):
"""Called when I see another user leaving a channel."""
self.dispatch('population', 'userLeft', user, channel)
def userQuit(self, user, quitmessage):
"""Called when I see another user quitting."""
self.dispatch('population', 'userQuit', user, quitmessage)
def userKicked(self, kickee, channel, kicker, message):
"""Called when I see another user get kicked."""
self.dispatch('population', 'userKicked', kickee, channel, kicker,
message)
def irc_RPL_NAMREPLY(self, prefix, params):
"""Called when the server responds to my names request"""
self.dispatch('population', 'namesReply', prefix, params)
def irc_RPL_ENDOFNAMES(self, prefix, params):
"""Called after the names request is finished"""
self.dispatch('population', 'namesEnd', prefix, params)
def noticed(self, user, channel, message):
log.info("NOTICE %s %s %s" % (user, channel, message))
# mozilla's nickserv responds as NickServ!services@mozilla.org
if (self.password and channel == self.nickname and
user.startswith('NickServ')):
if ("Password accepted" in message or
"You are now identified" in message):
self.joinChannels()
elif "Password incorrect" in message:
log.info("NickServ AUTH FAILED!!!!!!!")
reactor.stop()
# #### Hamper specific functions. #####
def _hamper_send(self, func, comm, message, encode, tag, vars, kwvars):
if type(message) == str:
log.warning('Warning, passing message as ascii instead of unicode '
'will cause problems. The message is: {0}'
.format(message))
format_kwargs = {}
format_kwargs.update(kwvars)
format_kwargs.update(comm)
try:
message = message.format(*vars, **format_kwargs)
except (ValueError, KeyError, IndexError) as e:
log.error('Could not format message: {e}'.format(e=e))
if encode:
message = message.encode('utf8')
if comm['pm']:
func(comm['user'], message)
else:
func(comm['channel'], message)
(self.factory.sent_messages
.setdefault(comm['channel'], deque(maxlen=100))
.append({
'comm': comm,
'message': message,
'tag': tag,
}))
def reply(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(self.msg, comm, message, encode, tag, vars, kwvars)
def me(self, comm, message, encode=True, tag=None, vars=[], kwvars={}):
self._hamper_send(
self.describe, comm, message, encode, tag, vars, kwvars)
|
hamperbot/hamper
|
hamper/commander.py
|
PluginLoader.dependencies_satisfied
|
python
|
def dependencies_satisfied(self, plugin):
for depends in plugin.dependencies:
if depends not in self.config['plugins']:
log.error("{0} depends on {1}, but {1} wasn't in the "
"config file. To use {0}, install {1} and add "
"it to the config.".format(plugin.name, depends))
return False
return True
|
Checks whether a plugin's dependencies are satisfied.
Logs an error if there is an unsatisfied dependencies
Returns: Bool
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/commander.py#L333-L346
| null |
class PluginLoader(object):
"""
I am a repository for plugins.
I understand how to load plugins and how to enumerate the plugins I've
loaded. Additionally, I can store configuration data for plugins.
Think of me as the piece of code that isolates plugin state from the
details of the network.
"""
def __init__(self, config):
self.config = config
self.plugins = []
def loadAll(self):
plugins_to_load = set()
# Gather plugins
for plugin in iter_entry_points(group='hamperbot.plugins', name=None):
if plugin.name in self.config['plugins']:
plugins_to_load.add(plugin.load())
# Sort by priority, highest first
plugins_to_load = sorted(plugins_to_load, key=lambda p: -p.priority)
# Check dependencies and load plugins.
for plugin_class in plugins_to_load:
plugin_obj = plugin_class()
if not self.dependencies_satisfied(plugin_obj):
log.warning('Dependency not satisfied for {0}. Not loading.'
.format(plugin_class.__name__))
continue
log.info('Loading plugin {0}.'.format(plugin_class.__name__))
plugin_obj.setup(self)
self.plugins.append(plugin_obj)
# Check for missing plugins
plugin_names = {x.name for x in self.plugins}
# Don't allow karma and karma_adv to be loaded at once
if ('karma' in self.config['plugins'] and
'karma_adv' in self.config['plugins']):
quit(
"Unable to load both karma and karma_adv at the same time")
for pattern in self.config['plugins']:
if pattern not in plugin_names:
log.warning('Sorry, I couldn\'t find a plugin named "%s"',
pattern)
def runPlugins(self, category, func, protocol, *args):
"""
Run the specified set of plugins against a given protocol.
"""
# Plugins are already sorted by priority
for plugin in self.plugins:
# If a plugin throws an exception, we should catch it gracefully.
try:
event_listener = getattr(plugin, func)
except AttributeError:
# If the plugin doesn't implement the event, do nothing
pass
else:
try:
stop = event_listener(protocol, *args)
if stop:
break
except Exception:
# A plugin should not be able to crash the bot.
# Catch and log all errors.
traceback.print_exc()
|
hamperbot/hamper
|
hamper/commander.py
|
PluginLoader.runPlugins
|
python
|
def runPlugins(self, category, func, protocol, *args):
# Plugins are already sorted by priority
for plugin in self.plugins:
# If a plugin throws an exception, we should catch it gracefully.
try:
event_listener = getattr(plugin, func)
except AttributeError:
# If the plugin doesn't implement the event, do nothing
pass
else:
try:
stop = event_listener(protocol, *args)
if stop:
break
except Exception:
# A plugin should not be able to crash the bot.
# Catch and log all errors.
traceback.print_exc()
|
Run the specified set of plugins against a given protocol.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/commander.py#L348-L368
| null |
class PluginLoader(object):
"""
I am a repository for plugins.
I understand how to load plugins and how to enumerate the plugins I've
loaded. Additionally, I can store configuration data for plugins.
Think of me as the piece of code that isolates plugin state from the
details of the network.
"""
def __init__(self, config):
self.config = config
self.plugins = []
def loadAll(self):
plugins_to_load = set()
# Gather plugins
for plugin in iter_entry_points(group='hamperbot.plugins', name=None):
if plugin.name in self.config['plugins']:
plugins_to_load.add(plugin.load())
# Sort by priority, highest first
plugins_to_load = sorted(plugins_to_load, key=lambda p: -p.priority)
# Check dependencies and load plugins.
for plugin_class in plugins_to_load:
plugin_obj = plugin_class()
if not self.dependencies_satisfied(plugin_obj):
log.warning('Dependency not satisfied for {0}. Not loading.'
.format(plugin_class.__name__))
continue
log.info('Loading plugin {0}.'.format(plugin_class.__name__))
plugin_obj.setup(self)
self.plugins.append(plugin_obj)
# Check for missing plugins
plugin_names = {x.name for x in self.plugins}
# Don't allow karma and karma_adv to be loaded at once
if ('karma' in self.config['plugins'] and
'karma_adv' in self.config['plugins']):
quit(
"Unable to load both karma and karma_adv at the same time")
for pattern in self.config['plugins']:
if pattern not in plugin_names:
log.warning('Sorry, I couldn\'t find a plugin named "%s"',
pattern)
def dependencies_satisfied(self, plugin):
"""
Checks whether a plugin's dependencies are satisfied.
Logs an error if there is an unsatisfied dependencies
Returns: Bool
"""
for depends in plugin.dependencies:
if depends not in self.config['plugins']:
log.error("{0} depends on {1}, but {1} wasn't in the "
"config file. To use {0}, install {1} and add "
"it to the config.".format(plugin.name, depends))
return False
return True
|
hamperbot/hamper
|
hamper/plugins/karma_adv.py
|
KarmaAdv.message
|
python
|
def message(self, bot, comm):
super(KarmaAdv, self).message(bot, comm)
# No directed karma giving or taking
if not comm['directed'] and not comm['pm']:
msg = comm['message'].strip().lower()
# use the magic above
words = self.regstr.findall(msg)
# Do things to people
karmas = self.modify_karma(words)
# Notify the users they can't modify their own karma
if comm['user'] in karmas.keys():
if karmas[comm['user']] <= 0:
bot.reply(comm, "Don't be so hard on yourself.")
else:
bot.reply(comm, "Tisk, tisk, no up'ing your own karma.")
# Commit karma changes to the db
self.update_db(comm["user"], karmas)
|
Check for strings ending with 2 or more '-' or '+'
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/plugins/karma_adv.py#L79-L99
|
[
"def message(self, bot, comm):\n super(ChatCommandPlugin, self).message(bot, comm)\n for cmd in self.commands:\n stop = cmd.message(bot, comm)\n if stop:\n return stop\n",
"def modify_karma(self, words):\n \"\"\"\n Given a regex object, look through the groups and modify karma\n as necessary\n \"\"\"\n\n # 'user': karma\n k = defaultdict(int)\n\n if words:\n # For loop through all of the group members\n for word_tuple in words:\n word = word_tuple[0]\n ending = word[-1]\n # This will either end with a - or +, if it's a - subract 1\n # kara, if it ends with a +, add 1 karma\n change = -1 if ending == '-' else 1\n # Now strip the ++ or -- from the end\n if '-' in ending:\n word = word.rstrip('-')\n elif '+' in ending:\n word = word.rstrip('+')\n # Check if surrounded by parens, if so, remove them\n if word.startswith('(') and word.endswith(')'):\n word = word[1:-1]\n # Finally strip whitespace\n word = word.strip()\n # Add the user to the dict\n if word:\n k[word] += change\n return k\n",
"def update_db(self, giver, receiverkarma):\n \"\"\"\n Record a the giver of karma, the receiver of karma, and the karma\n amount. Typically the count will be 1, but it can be any positive or\n negative integer.\n \"\"\"\n\n for receiver in receiverkarma:\n if receiver != giver:\n urow = KarmaStatsTable(\n ude(giver), ude(receiver), receiverkarma[receiver])\n self.db.session.add(urow)\n self.db.session.commit()\n"
] |
class KarmaAdv(ChatCommandPlugin):
'''Give, take, and scoreboard Internet Points'''
"""
Hamper will look for lines that end in ++ or -- and modify that user's
karma value accordingly as well as track a few other stats about users
NOTE: The user is just a string, this really could be anything...like
potatoes or the infamous cookie clicker....
"""
name = 'karma_adv'
priority = -2
short_desc = ("karma - Give positive or negative karma. Where you see"
" !karma, !score will work as well")
long_desc = ("username++ - Give karma\n"
"username-- - Take karma\n"
"!karma --top - Show the top 5 karma earners\n"
"!karma --bottom - Show the bottom 5 karma earners\n"
"!karma --giver or --taker - Show who's given the most"
" positive or negative karma\n"
"!karma --when-positive or --when-negative "
" - Show when people are the most positive or negative\n"
"!karma username - Show the user's karma count\n")
gotta_catch_em_all = r"""# 3 or statement
(
# Starting with a (, look for anything within
# parens that end with 2 or more + or -
(?=\()[^\)]+\)(\+\++|--+) |
# Looking from the start of the line until 2 or
# more - or + are found. No whitespace in this
# grouping
^[^\s]+(\+\++|--+) |
# Finally group any non-whitespace groupings
# that end with 2 or more + or -
[^\s]+?(\+\++|--+)((?=\s)|(?=$))
)
"""
regstr = re.compile(gotta_catch_em_all, re.X)
def setup(self, loader):
super(KarmaAdv, self).setup(loader)
self.db = loader.db
SQLAlchemyBase.metadata.create_all(self.db.engine)
# Config
config = loader.config.get("karma_adv", {})
self.timezone = config.get('timezone', 'UTC')
try:
self.tzinfo = timezone(self.timezone)
except UnknownTimeZoneError:
self.tzinfo = timezone('UTC')
self.timezone = 'UTC'
def modify_karma(self, words):
"""
Given a regex object, look through the groups and modify karma
as necessary
"""
# 'user': karma
k = defaultdict(int)
if words:
# For loop through all of the group members
for word_tuple in words:
word = word_tuple[0]
ending = word[-1]
# This will either end with a - or +, if it's a - subract 1
# kara, if it ends with a +, add 1 karma
change = -1 if ending == '-' else 1
# Now strip the ++ or -- from the end
if '-' in ending:
word = word.rstrip('-')
elif '+' in ending:
word = word.rstrip('+')
# Check if surrounded by parens, if so, remove them
if word.startswith('(') and word.endswith(')'):
word = word[1:-1]
# Finally strip whitespace
word = word.strip()
# Add the user to the dict
if word:
k[word] += change
return k
def update_db(self, giver, receiverkarma):
"""
Record a the giver of karma, the receiver of karma, and the karma
amount. Typically the count will be 1, but it can be any positive or
negative integer.
"""
for receiver in receiverkarma:
if receiver != giver:
urow = KarmaStatsTable(
ude(giver), ude(receiver), receiverkarma[receiver])
self.db.session.add(urow)
self.db.session.commit()
class KarmaList(Command):
"""
Return the highest or lowest 5 receivers of karma
"""
regex = r'^(?:score|karma) --(top|bottom)$'
LIST_MAX = 5
def command(self, bot, comm, groups):
# Let the database restrict the amount of rows we get back.
# We can then just deal with a few rows later on
session = bot.factory.loader.db.session
kcount = func.sum(KarmaStatsTable.kcount).label('kcount')
kts = session.query(KarmaStatsTable.receiver, kcount) \
.group_by(KarmaStatsTable.receiver)
# For legacy support
classic = session.query(KarmaStatsTable)
# Counter for sorting and updating data
counter = Counter()
if kts.count() or classic.count():
# We should limit the list of users to at most self.LIST_MAX
if groups[0] == 'top':
classic_q = classic.order_by(
KarmaStatsTable.kcount.desc()).limit(
self.LIST_MAX).all()
query = kts.order_by(kcount.desc())\
.limit(self.LIST_MAX).all()
counter.update(dict(classic_q))
counter.update(dict(query))
snippet = counter.most_common(self.LIST_MAX)
elif groups[0] == 'bottom':
classic_q = classic.order_by(KarmaStatsTable.kcount)\
.limit(self.LIST_MAX).all()
query = kts.order_by(kcount)\
.limit(self.LIST_MAX).all()
counter.update(dict(classic_q))
counter.update(dict(query))
snippet = reversed(counter.most_common(self.LIST_MAX))
else:
bot.reply(
comm, r'Something went wrong with karma\'s regex'
)
return
for rec in snippet:
bot.reply(
comm, '%s\x0f: %d' % (uen(rec[0]), rec[1]),
encode=False
)
else:
bot.reply(comm, 'No one has any karma yet :-(')
class UserKarma(Command):
"""
Retrieve karma for a given user
"""
# !karma <username>
regex = r'^(?:score|karma)(?:\s+([^-].*))?$'
def command(self, bot, comm, groups):
# The receiver (or in old terms, user) of the karma being tallied
receiver = groups[0]
if receiver is None:
reciever = comm['user']
receiver = ude(reciever.strip().lower())
# Manage both tables
sesh = bot.factory.loader.db.session
# Old Table
kt = sesh.query(KarmaStatsTable)
user = kt.filter(KarmaStatsTable.user == receiver).first()
# New Table
kst = sesh.query(KarmaStatsTable)
kst_list = kst.filter(KarmaStatsTable.receiver == receiver).all()
# The total amount of karma from both tables
total = 0
# Add karma from the old table
if user:
total += user.kcount
# Add karma from the new table
if kst_list:
for row in kst_list:
total += row.kcount
# Pluralization
points = "points"
if total == 1 or total == -1:
points = "point"
# Send the message
bot.reply(
comm, '%s has %d %s' % (uen(receiver), total, points),
encode=False
)
class KarmaGiver(Command):
"""
Identifies the person who gives the most karma
"""
regex = r'^(?:score|karma) --(giver|taker)$'
def command(self, bot, comm, groups):
kt = bot.factory.loader.db.session.query(KarmaStatsTable)
counter = Counter()
if groups[0] == 'giver':
positive_karma = kt.filter(KarmaStatsTable.kcount > 0)
for row in positive_karma:
counter[row.giver] += row.kcount
m = counter.most_common(1)
most = m[0] if m else None
if most:
bot.reply(
comm,
'%s has given the most karma (%d)' %
(uen(most[0]), most[1])
)
else:
bot.reply(
comm,
'No positive karma has been given yet :-('
)
elif groups[0] == 'taker':
negative_karma = kt.filter(KarmaStatsTable.kcount < 0)
for row in negative_karma:
counter[row.giver] += row.kcount
m = counter.most_common()
most = m[-1] if m else None
if most:
bot.reply(
comm,
'%s has given the most negative karma (%d)' %
(uen(most[0]), most[1])
)
else:
bot.reply(
comm,
'No negative karma has been given yet'
)
class MostActive(Command):
"""
Least/Most active hours of karma giving/taking
This will now look in the config for a timezone to use when displaying
the hour.
Example
Karma:
timezone: America/Los_Angeles
If no timezone is given, or it's invalid, time will be reported in UTC
"""
regex = r'^(?:score|karma)\s+--when-(positive|negative)'
def command(self, bot, comm, groups):
kt = bot.factory.loader.db.session.query(KarmaStatsTable)
counter = Counter()
if groups[0] == "positive":
karma = kt.filter(KarmaStatsTable.kcount > 0)
elif groups[0] == "negative":
karma = kt.filter(KarmaStatsTable.kcount < 0)
for row in karma:
hour = row.datetime.hour
counter[hour] += row.kcount
common_hour = (counter.most_common(1)[0][0]
if counter.most_common(1) else None)
# Title case for when
title_case = groups[0][0].upper() + groups[0][1:]
if common_hour:
# Create a datetime object
current_time = datetime.now(pytz.utc)
# Give it the common_hour
current_time = current_time.replace(hour=int(common_hour))
# Get the localized common hour
hour = self.plugin.tzinfo.normalize(
current_time.astimezone(self.plugin.tzinfo)).hour
# Report to the channel
bot.reply(
comm,
'%s karma is usually given during the %d:00 hour (%s)' %
(title_case, hour, self.plugin.timezone)
)
else:
# Inform that no karma of that type has been awarded yet
bot.reply(
comm,
'%s karma has been given yet' % title_case
)
|
hamperbot/hamper
|
hamper/plugins/karma_adv.py
|
KarmaAdv.modify_karma
|
python
|
def modify_karma(self, words):
# 'user': karma
k = defaultdict(int)
if words:
# For loop through all of the group members
for word_tuple in words:
word = word_tuple[0]
ending = word[-1]
# This will either end with a - or +, if it's a - subract 1
# kara, if it ends with a +, add 1 karma
change = -1 if ending == '-' else 1
# Now strip the ++ or -- from the end
if '-' in ending:
word = word.rstrip('-')
elif '+' in ending:
word = word.rstrip('+')
# Check if surrounded by parens, if so, remove them
if word.startswith('(') and word.endswith(')'):
word = word[1:-1]
# Finally strip whitespace
word = word.strip()
# Add the user to the dict
if word:
k[word] += change
return k
|
Given a regex object, look through the groups and modify karma
as necessary
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/plugins/karma_adv.py#L101-L131
| null |
class KarmaAdv(ChatCommandPlugin):
'''Give, take, and scoreboard Internet Points'''
"""
Hamper will look for lines that end in ++ or -- and modify that user's
karma value accordingly as well as track a few other stats about users
NOTE: The user is just a string, this really could be anything...like
potatoes or the infamous cookie clicker....
"""
name = 'karma_adv'
priority = -2
short_desc = ("karma - Give positive or negative karma. Where you see"
" !karma, !score will work as well")
long_desc = ("username++ - Give karma\n"
"username-- - Take karma\n"
"!karma --top - Show the top 5 karma earners\n"
"!karma --bottom - Show the bottom 5 karma earners\n"
"!karma --giver or --taker - Show who's given the most"
" positive or negative karma\n"
"!karma --when-positive or --when-negative "
" - Show when people are the most positive or negative\n"
"!karma username - Show the user's karma count\n")
gotta_catch_em_all = r"""# 3 or statement
(
# Starting with a (, look for anything within
# parens that end with 2 or more + or -
(?=\()[^\)]+\)(\+\++|--+) |
# Looking from the start of the line until 2 or
# more - or + are found. No whitespace in this
# grouping
^[^\s]+(\+\++|--+) |
# Finally group any non-whitespace groupings
# that end with 2 or more + or -
[^\s]+?(\+\++|--+)((?=\s)|(?=$))
)
"""
regstr = re.compile(gotta_catch_em_all, re.X)
def setup(self, loader):
super(KarmaAdv, self).setup(loader)
self.db = loader.db
SQLAlchemyBase.metadata.create_all(self.db.engine)
# Config
config = loader.config.get("karma_adv", {})
self.timezone = config.get('timezone', 'UTC')
try:
self.tzinfo = timezone(self.timezone)
except UnknownTimeZoneError:
self.tzinfo = timezone('UTC')
self.timezone = 'UTC'
def message(self, bot, comm):
"""
Check for strings ending with 2 or more '-' or '+'
"""
super(KarmaAdv, self).message(bot, comm)
# No directed karma giving or taking
if not comm['directed'] and not comm['pm']:
msg = comm['message'].strip().lower()
# use the magic above
words = self.regstr.findall(msg)
# Do things to people
karmas = self.modify_karma(words)
# Notify the users they can't modify their own karma
if comm['user'] in karmas.keys():
if karmas[comm['user']] <= 0:
bot.reply(comm, "Don't be so hard on yourself.")
else:
bot.reply(comm, "Tisk, tisk, no up'ing your own karma.")
# Commit karma changes to the db
self.update_db(comm["user"], karmas)
def update_db(self, giver, receiverkarma):
"""
Record a the giver of karma, the receiver of karma, and the karma
amount. Typically the count will be 1, but it can be any positive or
negative integer.
"""
for receiver in receiverkarma:
if receiver != giver:
urow = KarmaStatsTable(
ude(giver), ude(receiver), receiverkarma[receiver])
self.db.session.add(urow)
self.db.session.commit()
class KarmaList(Command):
"""
Return the highest or lowest 5 receivers of karma
"""
regex = r'^(?:score|karma) --(top|bottom)$'
LIST_MAX = 5
def command(self, bot, comm, groups):
# Let the database restrict the amount of rows we get back.
# We can then just deal with a few rows later on
session = bot.factory.loader.db.session
kcount = func.sum(KarmaStatsTable.kcount).label('kcount')
kts = session.query(KarmaStatsTable.receiver, kcount) \
.group_by(KarmaStatsTable.receiver)
# For legacy support
classic = session.query(KarmaStatsTable)
# Counter for sorting and updating data
counter = Counter()
if kts.count() or classic.count():
# We should limit the list of users to at most self.LIST_MAX
if groups[0] == 'top':
classic_q = classic.order_by(
KarmaStatsTable.kcount.desc()).limit(
self.LIST_MAX).all()
query = kts.order_by(kcount.desc())\
.limit(self.LIST_MAX).all()
counter.update(dict(classic_q))
counter.update(dict(query))
snippet = counter.most_common(self.LIST_MAX)
elif groups[0] == 'bottom':
classic_q = classic.order_by(KarmaStatsTable.kcount)\
.limit(self.LIST_MAX).all()
query = kts.order_by(kcount)\
.limit(self.LIST_MAX).all()
counter.update(dict(classic_q))
counter.update(dict(query))
snippet = reversed(counter.most_common(self.LIST_MAX))
else:
bot.reply(
comm, r'Something went wrong with karma\'s regex'
)
return
for rec in snippet:
bot.reply(
comm, '%s\x0f: %d' % (uen(rec[0]), rec[1]),
encode=False
)
else:
bot.reply(comm, 'No one has any karma yet :-(')
class UserKarma(Command):
"""
Retrieve karma for a given user
"""
# !karma <username>
regex = r'^(?:score|karma)(?:\s+([^-].*))?$'
def command(self, bot, comm, groups):
# The receiver (or in old terms, user) of the karma being tallied
receiver = groups[0]
if receiver is None:
reciever = comm['user']
receiver = ude(reciever.strip().lower())
# Manage both tables
sesh = bot.factory.loader.db.session
# Old Table
kt = sesh.query(KarmaStatsTable)
user = kt.filter(KarmaStatsTable.user == receiver).first()
# New Table
kst = sesh.query(KarmaStatsTable)
kst_list = kst.filter(KarmaStatsTable.receiver == receiver).all()
# The total amount of karma from both tables
total = 0
# Add karma from the old table
if user:
total += user.kcount
# Add karma from the new table
if kst_list:
for row in kst_list:
total += row.kcount
# Pluralization
points = "points"
if total == 1 or total == -1:
points = "point"
# Send the message
bot.reply(
comm, '%s has %d %s' % (uen(receiver), total, points),
encode=False
)
class KarmaGiver(Command):
"""
Identifies the person who gives the most karma
"""
regex = r'^(?:score|karma) --(giver|taker)$'
def command(self, bot, comm, groups):
kt = bot.factory.loader.db.session.query(KarmaStatsTable)
counter = Counter()
if groups[0] == 'giver':
positive_karma = kt.filter(KarmaStatsTable.kcount > 0)
for row in positive_karma:
counter[row.giver] += row.kcount
m = counter.most_common(1)
most = m[0] if m else None
if most:
bot.reply(
comm,
'%s has given the most karma (%d)' %
(uen(most[0]), most[1])
)
else:
bot.reply(
comm,
'No positive karma has been given yet :-('
)
elif groups[0] == 'taker':
negative_karma = kt.filter(KarmaStatsTable.kcount < 0)
for row in negative_karma:
counter[row.giver] += row.kcount
m = counter.most_common()
most = m[-1] if m else None
if most:
bot.reply(
comm,
'%s has given the most negative karma (%d)' %
(uen(most[0]), most[1])
)
else:
bot.reply(
comm,
'No negative karma has been given yet'
)
class MostActive(Command):
"""
Least/Most active hours of karma giving/taking
This will now look in the config for a timezone to use when displaying
the hour.
Example
Karma:
timezone: America/Los_Angeles
If no timezone is given, or it's invalid, time will be reported in UTC
"""
regex = r'^(?:score|karma)\s+--when-(positive|negative)'
def command(self, bot, comm, groups):
kt = bot.factory.loader.db.session.query(KarmaStatsTable)
counter = Counter()
if groups[0] == "positive":
karma = kt.filter(KarmaStatsTable.kcount > 0)
elif groups[0] == "negative":
karma = kt.filter(KarmaStatsTable.kcount < 0)
for row in karma:
hour = row.datetime.hour
counter[hour] += row.kcount
common_hour = (counter.most_common(1)[0][0]
if counter.most_common(1) else None)
# Title case for when
title_case = groups[0][0].upper() + groups[0][1:]
if common_hour:
# Create a datetime object
current_time = datetime.now(pytz.utc)
# Give it the common_hour
current_time = current_time.replace(hour=int(common_hour))
# Get the localized common hour
hour = self.plugin.tzinfo.normalize(
current_time.astimezone(self.plugin.tzinfo)).hour
# Report to the channel
bot.reply(
comm,
'%s karma is usually given during the %d:00 hour (%s)' %
(title_case, hour, self.plugin.timezone)
)
else:
# Inform that no karma of that type has been awarded yet
bot.reply(
comm,
'%s karma has been given yet' % title_case
)
|
hamperbot/hamper
|
hamper/plugins/karma_adv.py
|
KarmaAdv.update_db
|
python
|
def update_db(self, giver, receiverkarma):
for receiver in receiverkarma:
if receiver != giver:
urow = KarmaStatsTable(
ude(giver), ude(receiver), receiverkarma[receiver])
self.db.session.add(urow)
self.db.session.commit()
|
Record a the giver of karma, the receiver of karma, and the karma
amount. Typically the count will be 1, but it can be any positive or
negative integer.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/plugins/karma_adv.py#L133-L145
|
[
"def ude(s):\n return s.decode('utf-8')\n"
] |
class KarmaAdv(ChatCommandPlugin):
'''Give, take, and scoreboard Internet Points'''
"""
Hamper will look for lines that end in ++ or -- and modify that user's
karma value accordingly as well as track a few other stats about users
NOTE: The user is just a string, this really could be anything...like
potatoes or the infamous cookie clicker....
"""
name = 'karma_adv'
priority = -2
short_desc = ("karma - Give positive or negative karma. Where you see"
" !karma, !score will work as well")
long_desc = ("username++ - Give karma\n"
"username-- - Take karma\n"
"!karma --top - Show the top 5 karma earners\n"
"!karma --bottom - Show the bottom 5 karma earners\n"
"!karma --giver or --taker - Show who's given the most"
" positive or negative karma\n"
"!karma --when-positive or --when-negative "
" - Show when people are the most positive or negative\n"
"!karma username - Show the user's karma count\n")
gotta_catch_em_all = r"""# 3 or statement
(
# Starting with a (, look for anything within
# parens that end with 2 or more + or -
(?=\()[^\)]+\)(\+\++|--+) |
# Looking from the start of the line until 2 or
# more - or + are found. No whitespace in this
# grouping
^[^\s]+(\+\++|--+) |
# Finally group any non-whitespace groupings
# that end with 2 or more + or -
[^\s]+?(\+\++|--+)((?=\s)|(?=$))
)
"""
regstr = re.compile(gotta_catch_em_all, re.X)
def setup(self, loader):
super(KarmaAdv, self).setup(loader)
self.db = loader.db
SQLAlchemyBase.metadata.create_all(self.db.engine)
# Config
config = loader.config.get("karma_adv", {})
self.timezone = config.get('timezone', 'UTC')
try:
self.tzinfo = timezone(self.timezone)
except UnknownTimeZoneError:
self.tzinfo = timezone('UTC')
self.timezone = 'UTC'
def message(self, bot, comm):
"""
Check for strings ending with 2 or more '-' or '+'
"""
super(KarmaAdv, self).message(bot, comm)
# No directed karma giving or taking
if not comm['directed'] and not comm['pm']:
msg = comm['message'].strip().lower()
# use the magic above
words = self.regstr.findall(msg)
# Do things to people
karmas = self.modify_karma(words)
# Notify the users they can't modify their own karma
if comm['user'] in karmas.keys():
if karmas[comm['user']] <= 0:
bot.reply(comm, "Don't be so hard on yourself.")
else:
bot.reply(comm, "Tisk, tisk, no up'ing your own karma.")
# Commit karma changes to the db
self.update_db(comm["user"], karmas)
def modify_karma(self, words):
"""
Given a regex object, look through the groups and modify karma
as necessary
"""
# 'user': karma
k = defaultdict(int)
if words:
# For loop through all of the group members
for word_tuple in words:
word = word_tuple[0]
ending = word[-1]
# This will either end with a - or +, if it's a - subract 1
# kara, if it ends with a +, add 1 karma
change = -1 if ending == '-' else 1
# Now strip the ++ or -- from the end
if '-' in ending:
word = word.rstrip('-')
elif '+' in ending:
word = word.rstrip('+')
# Check if surrounded by parens, if so, remove them
if word.startswith('(') and word.endswith(')'):
word = word[1:-1]
# Finally strip whitespace
word = word.strip()
# Add the user to the dict
if word:
k[word] += change
return k
class KarmaList(Command):
"""
Return the highest or lowest 5 receivers of karma
"""
regex = r'^(?:score|karma) --(top|bottom)$'
LIST_MAX = 5
def command(self, bot, comm, groups):
# Let the database restrict the amount of rows we get back.
# We can then just deal with a few rows later on
session = bot.factory.loader.db.session
kcount = func.sum(KarmaStatsTable.kcount).label('kcount')
kts = session.query(KarmaStatsTable.receiver, kcount) \
.group_by(KarmaStatsTable.receiver)
# For legacy support
classic = session.query(KarmaStatsTable)
# Counter for sorting and updating data
counter = Counter()
if kts.count() or classic.count():
# We should limit the list of users to at most self.LIST_MAX
if groups[0] == 'top':
classic_q = classic.order_by(
KarmaStatsTable.kcount.desc()).limit(
self.LIST_MAX).all()
query = kts.order_by(kcount.desc())\
.limit(self.LIST_MAX).all()
counter.update(dict(classic_q))
counter.update(dict(query))
snippet = counter.most_common(self.LIST_MAX)
elif groups[0] == 'bottom':
classic_q = classic.order_by(KarmaStatsTable.kcount)\
.limit(self.LIST_MAX).all()
query = kts.order_by(kcount)\
.limit(self.LIST_MAX).all()
counter.update(dict(classic_q))
counter.update(dict(query))
snippet = reversed(counter.most_common(self.LIST_MAX))
else:
bot.reply(
comm, r'Something went wrong with karma\'s regex'
)
return
for rec in snippet:
bot.reply(
comm, '%s\x0f: %d' % (uen(rec[0]), rec[1]),
encode=False
)
else:
bot.reply(comm, 'No one has any karma yet :-(')
class UserKarma(Command):
"""
Retrieve karma for a given user
"""
# !karma <username>
regex = r'^(?:score|karma)(?:\s+([^-].*))?$'
def command(self, bot, comm, groups):
# The receiver (or in old terms, user) of the karma being tallied
receiver = groups[0]
if receiver is None:
reciever = comm['user']
receiver = ude(reciever.strip().lower())
# Manage both tables
sesh = bot.factory.loader.db.session
# Old Table
kt = sesh.query(KarmaStatsTable)
user = kt.filter(KarmaStatsTable.user == receiver).first()
# New Table
kst = sesh.query(KarmaStatsTable)
kst_list = kst.filter(KarmaStatsTable.receiver == receiver).all()
# The total amount of karma from both tables
total = 0
# Add karma from the old table
if user:
total += user.kcount
# Add karma from the new table
if kst_list:
for row in kst_list:
total += row.kcount
# Pluralization
points = "points"
if total == 1 or total == -1:
points = "point"
# Send the message
bot.reply(
comm, '%s has %d %s' % (uen(receiver), total, points),
encode=False
)
class KarmaGiver(Command):
"""
Identifies the person who gives the most karma
"""
regex = r'^(?:score|karma) --(giver|taker)$'
def command(self, bot, comm, groups):
kt = bot.factory.loader.db.session.query(KarmaStatsTable)
counter = Counter()
if groups[0] == 'giver':
positive_karma = kt.filter(KarmaStatsTable.kcount > 0)
for row in positive_karma:
counter[row.giver] += row.kcount
m = counter.most_common(1)
most = m[0] if m else None
if most:
bot.reply(
comm,
'%s has given the most karma (%d)' %
(uen(most[0]), most[1])
)
else:
bot.reply(
comm,
'No positive karma has been given yet :-('
)
elif groups[0] == 'taker':
negative_karma = kt.filter(KarmaStatsTable.kcount < 0)
for row in negative_karma:
counter[row.giver] += row.kcount
m = counter.most_common()
most = m[-1] if m else None
if most:
bot.reply(
comm,
'%s has given the most negative karma (%d)' %
(uen(most[0]), most[1])
)
else:
bot.reply(
comm,
'No negative karma has been given yet'
)
class MostActive(Command):
"""
Least/Most active hours of karma giving/taking
This will now look in the config for a timezone to use when displaying
the hour.
Example
Karma:
timezone: America/Los_Angeles
If no timezone is given, or it's invalid, time will be reported in UTC
"""
regex = r'^(?:score|karma)\s+--when-(positive|negative)'
def command(self, bot, comm, groups):
kt = bot.factory.loader.db.session.query(KarmaStatsTable)
counter = Counter()
if groups[0] == "positive":
karma = kt.filter(KarmaStatsTable.kcount > 0)
elif groups[0] == "negative":
karma = kt.filter(KarmaStatsTable.kcount < 0)
for row in karma:
hour = row.datetime.hour
counter[hour] += row.kcount
common_hour = (counter.most_common(1)[0][0]
if counter.most_common(1) else None)
# Title case for when
title_case = groups[0][0].upper() + groups[0][1:]
if common_hour:
# Create a datetime object
current_time = datetime.now(pytz.utc)
# Give it the common_hour
current_time = current_time.replace(hour=int(common_hour))
# Get the localized common hour
hour = self.plugin.tzinfo.normalize(
current_time.astimezone(self.plugin.tzinfo)).hour
# Report to the channel
bot.reply(
comm,
'%s karma is usually given during the %d:00 hour (%s)' %
(title_case, hour, self.plugin.timezone)
)
else:
# Inform that no karma of that type has been awarded yet
bot.reply(
comm,
'%s karma has been given yet' % title_case
)
|
hamperbot/hamper
|
hamper/plugins/karma.py
|
Karma.update_db
|
python
|
def update_db(self, userkarma, username):
kt = self.db.session.query(KarmaTable)
for user in userkarma:
if user != username:
# Modify the db accourdingly
urow = kt.filter(KarmaTable.user == ude(user)).first()
# If the user doesn't exist, create it
if not urow:
urow = KarmaTable(ude(user))
urow.kcount += userkarma[user]
self.db.session.add(urow)
self.db.session.commit()
|
Change the users karma by the karma amount (either 1 or -1)
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/plugins/karma.py#L142-L157
|
[
"def ude(s):\n return s.decode('utf-8')\n"
] |
class Karma(ChatCommandPlugin):
'''Give, take, and scoreboard Internet Points'''
"""
Hamper will look for lines that end in ++ or -- and modify that user's
karma value accordingly
!karma --top: shows (at most) the top 5
!karma --bottom: shows (at most) the bottom 5
!karma <username>: displays the karma for a given user
NOTE: The user is just a string, this really could be anything...like
potatoes or the infamous cookie clicker....
"""
name = 'karma'
priority = -2
short_desc = 'karma/score - Give or take karma from someone'
long_desc = ('username++ - Give karma\n'
'username-- - Take karma\n'
'!karma --top - Show the top 5 karma earners\n'
'!karma --bottom - Show the bottom 5 karma earners\n'
'!karma username - Show the user\'s karma count\n')
gotta_catch_em_all = r"""# 3 or statement
(
# Starting with a (, look for anything within
# parens that end with 2 or more + or -
(?=\()[^\)]+\)(\+\++|--+) |
# Looking from the start of the line until 2 or
# more - or + are found. No whitespace in this
# grouping
^[^\s]+(\+\++|--+) |
# Finally group any non-whitespace groupings
# that end with 2 or more + or -
[^\s]+?(\+\++|--+)((?=\s)|(?=$))
)
"""
regstr = re.compile(gotta_catch_em_all, re.X)
def setup(self, loader):
super(Karma, self).setup(loader)
self.db = loader.db
SQLAlchemyBase.metadata.create_all(self.db.engine)
def message(self, bot, comm):
"""
Check for strings ending with 2 or more '-' or '+'
"""
super(Karma, self).message(bot, comm)
# No directed karma giving or taking
if not comm['directed'] and not comm['pm']:
msg = comm['message'].strip().lower()
# use the magic above
words = self.regstr.findall(msg)
# Do things to people
karmas = self.modify_karma(words)
# Notify the users they can't modify their own karma
if comm['user'] in karmas.keys():
bot.reply(comm, "Nice try, no modifying your own karma")
# Maybe have an opinion
self.opine(bot, comm, karmas)
# Commit karma changes to the db
self.update_db(karmas, comm['user'])
def opine(self, bot, comm, karmas):
if len(karmas) == 0:
return False
resp = ' and '.join(karmas)
# Let's have an opinion!
if random.random() < .7:
resp = random.choice(positives) + resp + "!"
else:
resp = random.choice(negatives) + resp + "?"
if random.random() < .3:
bot.reply(comm, resp)
def modify_karma(self, words):
"""
Given a regex object, look through the groups and modify karma
as necessary
"""
# 'user': karma
k = defaultdict(int)
if words:
# For loop through all of the group members
for word_tuple in words:
word = word_tuple[0]
ending = word[-1]
# This will either end with a - or +, if it's a - subract 1
# kara, if it ends with a +, add 1 karma
change = -1 if ending == '-' else 1
# Now strip the ++ or -- from the end
if '-' in ending:
word = word.rstrip('-')
elif '+' in ending:
word = word.rstrip('+')
# Check if surrounded by parens, if so, remove them
if word.startswith('(') and word.endswith(')'):
word = word[1:-1]
# Finally strip whitespace
word = word.strip()
# Add the user to the dict
if word:
k[word] += change
return k
class KarmaList(Command):
"""
Return the top or bottom 5
"""
regex = r'^(?:score|karma) --(top|bottom)$'
LIST_MAX = 5
def command(self, bot, comm, groups):
users = bot.factory.loader.db.session.query(KarmaTable)
user_count = users.count()
top = self.LIST_MAX if user_count >= self.LIST_MAX else user_count
if top:
show = (KarmaTable.kcount.desc() if groups[0] == 'top'
else KarmaTable.kcount)
for user in users.order_by(show)[0:top]:
bot.reply(
comm, str('%s\x0f: %d' % (user.user, user.kcount))
)
else:
bot.reply(comm, r'No one has any karma yet :-(')
class UserKarma(Command):
"""
Retrieve karma for a given user
"""
# !karma <username>
regex = r'^(?:score|karma)\s+([^-].*)$'
def command(self, bot, comm, groups):
# Play nice when the user isn't in the db
kt = bot.factory.loader.db.session.query(KarmaTable)
thing = ude(groups[0].strip().lower())
user = kt.filter(KarmaTable.user == thing).first()
if user:
bot.reply(
comm, '%s has %d points' % (uen(user.user), user.kcount),
encode=False
)
else:
bot.reply(
comm, 'No karma for %s ' % uen(thing), encode=False
)
|
hamperbot/hamper
|
hamper/plugins/commands.py
|
Dice.roll
|
python
|
def roll(cls, num, sides, add):
rolls = []
for i in range(num):
rolls.append(random.randint(1, sides))
rolls.append(add)
return rolls
|
Rolls a die of sides sides, num times, sums them, and adds add
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/plugins/commands.py#L173-L179
| null |
class Dice(ChatCommandPlugin):
"""Random dice rolls!"""
name = 'dice'
priority = 5
def setup(self, *args, **kwargs):
super(Dice, self).setup(*args, **kwargs)
log.info('dice setup')
@classmethod
class DiceCommand(Command):
name = 'dice'
regex = '^(\d*)d(?:ice)?(\d*)\+?(\d*)$'
onlyDirected = True
short_desc = 'Dice - Roll dice by saying !XdY+Z.'
long_desc = ('Use like XdY+Z to roll X Y sided dice and add Z. Any '
'number may be left off.\n'
'Example: "!1d20+5" to roll a single twenty sided die '
'and add 5 to the result. You don\'t have to direct '
'this to the bot.')
def command(self, bot, com, groups):
num, sides, add = groups
if not num:
num = 1
else:
num = int(num)
if not sides:
sides = 6
else:
sides = int(sides)
if not add:
add = 0
else:
add = int(add)
result = Dice.roll(num, sides, add)
output = '%s: You rolled %sd%s+%s and got ' % (com['user'], num,
sides, add)
if len(result) < 11:
# the last one is the constant to add
for die in result[:-1]:
output += "%s, " % die
else:
output += "a lot of dice "
output += "for a total of %s" % sum(result)
bot.say(com['channel'], output)
|
hamperbot/hamper
|
hamper/plugins/foods.py
|
FoodsPlugin.describe_ingredient
|
python
|
def describe_ingredient(self):
resp = random.choice(ingredients)
if random.random() < .2:
resp = random.choice(foodqualities) + " " + resp
if random.random() < .2:
resp += " with " + self.describe_additive()
return resp
|
apple. tart apple with vinegar.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/plugins/foods.py#L279-L286
| null |
class FoodsPlugin(ChatPlugin):
"""Even robots can get peckish"""
name = 'foods'
priority = 0
def setup(self, *args):
pass
def articleize(self, noun):
if random.random() < .3:
noun = random.choice(foodunits) + " of " + noun
if noun[0] in ['a', 'e', 'i', 'o', 'u', 'y']:
return "an " + noun
return "a " + noun
def discusses_food(self, msg):
for d in discussors:
if d in msg:
return d.strip() + "? "
return False
def describe_additive(self):
""" vinegar. spicy vinegar. a spicy vinegar. """
resp = random.choice(additives)
if random.random() < .2:
resp = random.choice(foodqualities) + ' ' + resp
if random.random() < .01:
resp = self.articleize(resp)
return resp
def describe_dish(self):
"""a burrito. a lettuce burrito with ketchup and raspberry."""
resp = random.choice(foodpreparations)
if random.random() < .85:
resp = self.describe_ingredient() + ' ' + resp
if random.random() < .2:
resp = self.describe_ingredient() + ' and ' + resp
if random.random() < .2:
resp = self.describe_ingredient() + ', ' + resp
if random.random() < .5:
resp += " with " + self.describe_additive()
elif random.random() < .5:
resp += " with " + self.describe_ingredient()
return self.articleize(resp)
def describe_meal(self):
resp = self.describe_dish()
if random.random() < .1:
resp += ", and " + self.describe_meal()
return resp
def suggest(self):
resp = self.describe_meal()
if random.random() < .7:
resp = random.choice(foodverbs) + ' ' + resp
if random.random() < .5:
resp = random.choice(suggestions) + ' ' + resp
if random.random() < .3:
resp += random.choice([' made with ', ' on ', ' using '])
resp += self.articleize(random.choice(foodtools))
return resp
def foodyreply(self, bot, comm, prefix = ""):
resp = prefix + self.suggest()
bot.reply(comm, '{0}: {1}'.format(comm['user'], resp))
def message(self, bot, comm):
msg = ude(comm['message'].strip())
prefix = self.discusses_food(msg)
if prefix:
if comm['directed']:
# always reply on question or comment to self about food
self.foodyreply(bot, comm)
elif random.random() < .7:
# often interject anyways
self.foodyreply(bot, comm, prefix)
return True
return False
|
hamperbot/hamper
|
hamper/plugins/foods.py
|
FoodsPlugin.describe_additive
|
python
|
def describe_additive(self):
resp = random.choice(additives)
if random.random() < .2:
resp = random.choice(foodqualities) + ' ' + resp
if random.random() < .01:
resp = self.articleize(resp)
return resp
|
vinegar. spicy vinegar. a spicy vinegar.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/plugins/foods.py#L288-L295
| null |
class FoodsPlugin(ChatPlugin):
"""Even robots can get peckish"""
name = 'foods'
priority = 0
def setup(self, *args):
pass
def articleize(self, noun):
if random.random() < .3:
noun = random.choice(foodunits) + " of " + noun
if noun[0] in ['a', 'e', 'i', 'o', 'u', 'y']:
return "an " + noun
return "a " + noun
def discusses_food(self, msg):
for d in discussors:
if d in msg:
return d.strip() + "? "
return False
def describe_ingredient(self):
""" apple. tart apple with vinegar. """
resp = random.choice(ingredients)
if random.random() < .2:
resp = random.choice(foodqualities) + " " + resp
if random.random() < .2:
resp += " with " + self.describe_additive()
return resp
def describe_dish(self):
"""a burrito. a lettuce burrito with ketchup and raspberry."""
resp = random.choice(foodpreparations)
if random.random() < .85:
resp = self.describe_ingredient() + ' ' + resp
if random.random() < .2:
resp = self.describe_ingredient() + ' and ' + resp
if random.random() < .2:
resp = self.describe_ingredient() + ', ' + resp
if random.random() < .5:
resp += " with " + self.describe_additive()
elif random.random() < .5:
resp += " with " + self.describe_ingredient()
return self.articleize(resp)
def describe_meal(self):
resp = self.describe_dish()
if random.random() < .1:
resp += ", and " + self.describe_meal()
return resp
def suggest(self):
resp = self.describe_meal()
if random.random() < .7:
resp = random.choice(foodverbs) + ' ' + resp
if random.random() < .5:
resp = random.choice(suggestions) + ' ' + resp
if random.random() < .3:
resp += random.choice([' made with ', ' on ', ' using '])
resp += self.articleize(random.choice(foodtools))
return resp
def foodyreply(self, bot, comm, prefix = ""):
resp = prefix + self.suggest()
bot.reply(comm, '{0}: {1}'.format(comm['user'], resp))
def message(self, bot, comm):
msg = ude(comm['message'].strip())
prefix = self.discusses_food(msg)
if prefix:
if comm['directed']:
# always reply on question or comment to self about food
self.foodyreply(bot, comm)
elif random.random() < .7:
# often interject anyways
self.foodyreply(bot, comm, prefix)
return True
return False
|
hamperbot/hamper
|
hamper/plugins/foods.py
|
FoodsPlugin.describe_dish
|
python
|
def describe_dish(self):
resp = random.choice(foodpreparations)
if random.random() < .85:
resp = self.describe_ingredient() + ' ' + resp
if random.random() < .2:
resp = self.describe_ingredient() + ' and ' + resp
if random.random() < .2:
resp = self.describe_ingredient() + ', ' + resp
if random.random() < .5:
resp += " with " + self.describe_additive()
elif random.random() < .5:
resp += " with " + self.describe_ingredient()
return self.articleize(resp)
|
a burrito. a lettuce burrito with ketchup and raspberry.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/plugins/foods.py#L297-L310
|
[
"def describe_ingredient(self):\n \"\"\" apple. tart apple with vinegar. \"\"\"\n resp = random.choice(ingredients)\n if random.random() < .2:\n resp = random.choice(foodqualities) + \" \" + resp\n if random.random() < .2:\n resp += \" with \" + self.describe_additive()\n return resp\n"
] |
class FoodsPlugin(ChatPlugin):
"""Even robots can get peckish"""
name = 'foods'
priority = 0
def setup(self, *args):
pass
def articleize(self, noun):
if random.random() < .3:
noun = random.choice(foodunits) + " of " + noun
if noun[0] in ['a', 'e', 'i', 'o', 'u', 'y']:
return "an " + noun
return "a " + noun
def discusses_food(self, msg):
for d in discussors:
if d in msg:
return d.strip() + "? "
return False
def describe_ingredient(self):
""" apple. tart apple with vinegar. """
resp = random.choice(ingredients)
if random.random() < .2:
resp = random.choice(foodqualities) + " " + resp
if random.random() < .2:
resp += " with " + self.describe_additive()
return resp
def describe_additive(self):
""" vinegar. spicy vinegar. a spicy vinegar. """
resp = random.choice(additives)
if random.random() < .2:
resp = random.choice(foodqualities) + ' ' + resp
if random.random() < .01:
resp = self.articleize(resp)
return resp
def describe_meal(self):
resp = self.describe_dish()
if random.random() < .1:
resp += ", and " + self.describe_meal()
return resp
def suggest(self):
resp = self.describe_meal()
if random.random() < .7:
resp = random.choice(foodverbs) + ' ' + resp
if random.random() < .5:
resp = random.choice(suggestions) + ' ' + resp
if random.random() < .3:
resp += random.choice([' made with ', ' on ', ' using '])
resp += self.articleize(random.choice(foodtools))
return resp
def foodyreply(self, bot, comm, prefix = ""):
resp = prefix + self.suggest()
bot.reply(comm, '{0}: {1}'.format(comm['user'], resp))
def message(self, bot, comm):
msg = ude(comm['message'].strip())
prefix = self.discusses_food(msg)
if prefix:
if comm['directed']:
# always reply on question or comment to self about food
self.foodyreply(bot, comm)
elif random.random() < .7:
# often interject anyways
self.foodyreply(bot, comm, prefix)
return True
return False
|
hamperbot/hamper
|
hamper/config.py
|
replace_env_vars
|
python
|
def replace_env_vars(conf):
d = deepcopy(conf)
for key, value in d.items():
if type(value) == dict:
d[key] = replace_env_vars(value)
elif type(value) == str:
if value[0] == '$':
var_name = value[1:]
d[key] = os.environ[var_name]
return d
|
Fill `conf` with environment variables, where appropriate.
Any value of the from $VAR will be replaced with the environment variable
VAR. If there are sub dictionaries, this function will recurse.
This will preserve the original dictionary, and return a copy.
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/config.py#L36-L53
|
[
"def replace_env_vars(conf):\n \"\"\"Fill `conf` with environment variables, where appropriate.\n\n Any value of the from $VAR will be replaced with the environment variable\n VAR. If there are sub dictionaries, this function will recurse.\n\n This will preserve the original dictionary, and return a copy.\n \"\"\"\n d = deepcopy(conf)\n for key, value in d.items():\n if type(value) == dict:\n d[key] = replace_env_vars(value)\n elif type(value) == str:\n if value[0] == '$':\n var_name = value[1:]\n d[key] = os.environ[var_name]\n\n return d\n"
] |
import os
import sys
from copy import deepcopy
import yaml
def load():
try:
with open('hamper.conf') as config_file:
config = yaml.load(config_file)
except IOError:
config = {}
config = replace_env_vars(config)
# Fill in data from the env:
for k, v in os.environ.items():
try:
config[k] = yaml.load(v)
except yaml.error.YAMLError:
config[k] = v
# Special case: database
if 'DATABASE_URL' in os.environ:
config['db'] = os.environ['DATABASE_URL']
for key in ['server', 'port', 'nickname', 'channels']:
if (key not in config) or (not config[key]):
print('You need to define {0} in the config file.'.format(key))
sys.exit()
return config
|
hamperbot/hamper
|
hamper/plugins/questions.py
|
YesNoPlugin.setup
|
python
|
def setup(self, *args):
responses = [
('Yes.', 'eq'),
('How should I know?', 'eq'),
('Try asking a human', 'eq'),
('Eww.', 'eq'),
('You\'d just do the opposite of whatever I tell you', 'eq'),
('No.', 'eq'),
('Nope.', 'eq'),
('Maybe.', 'eq'),
('Possibly.', 'eq'),
('It could be.', 'eq'),
("No. No, I don't think so.", 'eq/2'),
('Without a doubt.', 'eq/2'),
('I think... Yes.', 'eq/2'),
('Heck yes!', 'eq/2'),
('Maybe. Possibly. It could be.', 'eq/2'),
('Ask again later.', 'eq/3'),
("I don't know.", 'eq/3'),
("I'm sorry, I was thinking of bananas", 'eq/100'),
]
self.advices = [(x, 1) for x in obliques]
total_prob = 0
real_resp = []
evens = []
for resp, prob in responses:
if isinstance(prob, str):
if prob.startswith('eq'):
sp = prob.split('/')
if len(sp) == 1:
evens.append((resp, 1))
else:
div = int(sp[1])
evens.append((resp, 1.0 / div))
else:
real_resp.append((resp, prob))
total_prob += prob
# Share is the probability of a "eq" probability. Share/2 would be the
# probability of a "eq/2" probability.
share = (1 - total_prob) / sum(div for _, div in evens)
for resp, divisor in evens:
real_resp.append((resp, share * divisor))
self.responses = real_resp
self.is_question = re.compile('.*\?(\?|!)*$')
|
Set up the list of responses, with weights. If the weight of a response
is 'eq', it will be assigned a value that splits what is left after
everything that has a number is assigned. If it's weight is some
fraction of 'eq' (ie: 'eq/2' or 'eq/3'), then it will be assigned
1/2, 1/3, etc of the 'eq' weight. All probabilities will add up to
1.0 (plus or minus any rounding errors).
|
train
|
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/plugins/questions.py#L549-L605
| null |
class YesNoPlugin(ChatPlugin):
name = 'yesno'
priority = -1
def shouldq(self, bot, comm):
resp = random.choice(obliques)
bot.reply(comm, '{0}: {1}'.format(comm['user'], resp))
return True
def articleize(self, noun):
if random.random() < .3:
noun = random.choice(adjs) + ' ' + noun
if noun[0] in ['a', 'e', 'i', 'o', 'u', 'y']:
return "an " + noun
return "a " + noun
def canq(self, bot, comm):
resp = random.choice(canstarts)
resp += self.articleize(random.choice(nouns))
if random.random() < .5:
resp += " and " + self.articleize(random.choice(nouns))
if random.random() < .1:
resp += " and " + self.articleize(random.choice(nouns))
resp += random.choice(['...', '.', '?'])
bot.reply(comm, '{0}: {1}'.format(comm['user'], resp))
return True
def xtoy(self):
x = str(int(random.random()*10))
y = str(int(random.random()*10))
return x + " to " + y
def manything(self, msg):
parts = msg.split()
quantifiers = ['many', 'much']
for q in quantifiers:
if q in msg:
idx = parts.index(q)
for i in range(idx, len(parts)):
if len(parts[i]) > 4:
# Let's pretend to plural.
return parts[i].rstrip('s') + 's'
return None
def howmany(self, bot, comm, msg):
thing = self.manything(msg)
resp = random.randint(-5, 100)
if resp > 80:
resp = random.randint(80, 1000)
resp = str(resp)
if resp == '0':
if thing:
resp = "No " + thing + " at all."
else:
resp = "None at all."
if thing:
if resp == '1':
resp = "Just a single " + thing.rstrip('s')
else:
resp += " " + thing
if random.random() < .05:
if thing:
resp = "All the " + thing + "!"
else:
resp = "All of them!"
bot.reply(comm, resp)
return True
def betting(self, bot, comm):
resp = random.choice(bettings)
if random.random() < .7:
resp = random.choice(idcall)
resp += random.choice(foragainst)
resp += random.choice(['it ','that ','such nonsense ', 'such a thing '])
resp += self.xtoy()
bot.reply(comm, resp)
return True
def hamperesque(self, bot, comm, msg):
whatsay = ""
if "n't" in msg:
whatsay = random.choice(negatories)
for n in negatories:
if n in msg:
whatsay = random.choice(negatories)
for a in affirmatives:
if a in msg:
whatsay = random.choice(affirmatives)
if "n't" in msg:
whatsay = random.choice(negatories)
if whatsay != "":
bot.reply(comm, '{0}: {1}'.format(comm['user'], whatsay))
else:
r = random.random()
replies = self.responses
for resp, prob in replies:
r -= prob
if r < 0:
bot.reply(comm, '{0}: {1}'.format(comm['user'], resp))
return True
def sortq(self, bot, comm, msg):
if "should " in msg:
return self.shouldq(bot, comm)
for b in betwords:
if b in msg:
return self.betting(bot, comm)
if "can " in msg or "could" in msg:
return self.canq(bot, comm)
if "many" in msg or "much" in msg:
# TODO handle "much" with units
return self.howmany(bot, comm, msg)
return self.hamperesque(bot, comm, msg)
def message(self, bot, comm):
msg = ude(comm['message'].strip()).lower()
if self.is_question.search(msg):
if comm['directed']:
self.sortq(bot, comm, msg)
elif random.random() < .1:
self.sortq(bot, comm, msg)
return False
|
aleontiev/dj
|
dj/application.py
|
Application.parse_application_name
|
python
|
def parse_application_name(setup_filename):
with open(setup_filename, 'rt') as setup_file:
fst = RedBaron(setup_file.read())
for node in fst:
if (
node.type == 'atomtrailers' and
str(node.name) == 'setup'
):
for call in node.call:
if str(call.name) == 'name':
value = call.value
if hasattr(value, 'to_python'):
value = value.to_python()
name = str(value)
break
if name:
break
return name
|
Parse a setup.py file for the name.
Returns:
name, or None
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/application.py#L67-L89
| null |
class Application(object):
def __init__(
self,
stdout=None,
directory=None
):
self.stdout = stdout or _stdout
current = os.getcwd()
nearest_setup_file = find_nearest(current, 'setup.py')
self.directory = directory or (
os.path.dirname(
nearest_setup_file
) if nearest_setup_file else current
)
self.config = Config(self.directory)
self.setup_file = os.path.join(
self.directory,
'setup.py'
)
self.requirements_file = os.path.join(
self.directory,
self.config.get('requirements')
)
self.dev_requirements_file = os.path.join(
self.directory,
self.config.get('devRequirements'),
)
self.local_requirements_file = os.path.join(
self.directory,
self.config.get('localRequirements'),
)
self.runtime = Runtime(self.config.get('runtime'))
def __unicode__(self):
return '%s (%s)' % (self.name, self.directory)
@property
def exists(self):
if not hasattr(self, '_exists'):
self._exists = os.path.exists(self.setup_file)
return self._exists
@staticmethod
def _get_name(self):
name = self.config.get('name')
if name:
return name
if self.exists:
try:
name = Application.parse_application_name(self.setup_file)
except Exception:
name = 'unknown'
self.config.set('name', name)
self.config.save()
return name
@property
def name(self):
if not hasattr(self, '_name'):
self._name = self._get_name()
return self._name
@property
def addons(self):
if not hasattr(self, '_addons'):
self._addons = {
a.name: a for a in self.get_addons()
}
return self._addons
def get_addons(self):
self.build()
addons = []
for directory in get_directories(
self.environment.package_directory,
filter=lambda x: x.endswith('/blueprints')
):
parent_directory = '/'.join(directory.split('/')[0:-1])
name = os.path.basename(parent_directory)
addons.append(Addon(name, parent_directory))
return addons
def refresh(self):
if hasattr(self, '_name'):
del self._name
if hasattr(self, '_blueprints'):
del self._blueprints
if hasattr(self, '_addons'):
del self._addons
if hasattr(self, '_exists'):
del self._exists
@property
def blueprints(self):
if not hasattr(self, '_blueprints'):
self._blueprints = {}
for b in self.get_blueprints():
# add by full name, e.g. dj.model
self._blueprints[b.full_name] = b
if not b.addon or b.name not in self._blueprints:
# for blueprints other that init or core,
# add them to the global namespace
self._blueprints[b.name] = b
return self._blueprints
def get_blueprints(self):
addons = self.addons.values()
blueprints = [a.blueprints.values() for a in addons]
return get_core_blueprints() + [x for s in blueprints for x in s]
@property
def requirements_last_modified(self):
return get_last_touched(self.requirements_file)
@property
def dev_requirements_last_modified(self):
return get_last_touched(self.dev_requirements_file)
@property
def local_requirements_last_modified(self):
return get_last_touched(self.local_requirements_file)
@property
def setup_last_modified(self):
# timestamp of last setup.py change
return get_last_touched(self.setup_file)
@property
def environment(self):
if not hasattr(self, '_environment'):
self._environment = self.runtime.create_environment(
self.config.environment_path
)
return self._environment
def _get_build_token(self, key):
return os.path.join(
self.environment.virtual_directory, 'build.%s' % key
)
def _build(self, key, last_modified, cmd, verbose=True):
token = self._get_build_token(key)
last_built = get_last_touched(token)
if not last_built or last_built < last_modified:
self.stdout.write(style.format_command('Building', key))
result = self.execute(cmd, verbose=False, capture=True)
if 'pip' in cmd:
deps = []
for line in result.split('\n'):
splits = line.split(' ')
if line.startswith('Successfully installed'):
dep = splits[2]
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.green('+ ')))
elif line.startswith('Requirement already satisfied: '):
dep = splits[3]
dep = Dependency(dep)
deps.append((dep, style.yellow('. ')))
elif 'Uninstalling' in line:
index = line.index('Uninstalling')
dep = line[index:].split(' ')[1]
dep = ''.join(dep[0:len(dep) - 1])
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.red('- ')))
for dep, prefix in sorted(
deps,
key=lambda x: str(x[0])
):
self.stdout.write(prefix + dep.to_stdout())
touch(token)
def build(self):
"""Builds the app in the app's environment.
Only builds if the build is out-of-date and is non-empty.
Builds in 3 stages: requirements, dev requirements, and app.
pip is used to install requirements, and setup.py is used to
install the app itself.
Raises:
ValidationError if the app fails to build.
"""
if self.exists:
self._build(
'requirements',
self.requirements_last_modified,
'pip install -U -r %s' % self.requirements_file
)
try:
self._build(
'requirements (dev)',
self.dev_requirements_last_modified,
'pip install -U -r %s' % self.dev_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find dev requirements')
)
try:
self._build(
'requirements (local)',
self.local_requirements_last_modified,
'pip install -U -r %s' % self.local_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find local requirements')
)
self._build(
'application',
self.setup_last_modified,
'python %s develop' % self.setup_file
)
def execute(self, command, **kwargs):
return self.environment.execute(command, **kwargs)
def run(self, command, **kwargs):
self.build()
self.stdout.write(style.format_command('Running', command))
return self.execute(command, **kwargs)
def generate(self, blueprint, context, interactive=True):
"""Generate a blueprint within this application."""
if not isinstance(blueprint, Blueprint):
bp = self.blueprints.get(blueprint)
if not bp:
raise ValueError('%s is not a valid blueprint' % blueprint)
blueprint = bp
self.stdout.write(
style.format_command(
'Generating',
blueprint.full_name
)
)
generator = Generator(
self,
blueprint,
context,
interactive=interactive
)
result = generator.generate()
if blueprint.name == 'init':
# try re-setting the name
self.refresh()
return result
def get_dependency_manager(self, dev=False):
return DependencyManager(
os.path.join(
self.directory,
self.dev_requirements_file if dev else self.requirements_file
)
)
def add(self, addon, dev=False, interactive=True):
"""Add a new dependency and install it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
existing = dependencies.get(addon)
self.stdout.write(style.format_command('Adding', addon))
dependencies.add(addon)
try:
# try running the build
self.build()
self.refresh()
# remove version of this in other requirements file
other_dependencies.remove(addon, warn=False)
# run new addon constructor
constructor_name = '%s.init' % Dependency(addon).module_name
constructor = self.blueprints.get(constructor_name)
if constructor:
context = constructor.load_context().main(
[], standalone_mode=False
)
self.generate(constructor, context, interactive=interactive)
except Exception as e:
# restore original settings
self.stdout.write(style.red(str(e)))
self.stdout.write(
style.yellow('Could not find %s' % addon)
)
dependencies.remove(addon)
if existing:
dependencies.add(existing)
return
def remove(self, addon, dev=False):
"""Remove a dependency and uninstall it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command('Removing', addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False)
if removed:
self.build()
else:
exception = '%s is not installed.' % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception))
def info(self):
output = []
dev_requirements = self.get_dependency_manager(dev=True).dependencies
requirements = self.get_dependency_manager(dev=False).dependencies
app = self.to_stdout()
if self.exists:
output.append(style.blue('Application:\n %s' % app))
if requirements:
output.append(style.blue('Requirements:'))
for _, dep in sorted(
requirements.items(),
key=lambda x: x[0].lower()):
output.append(' ' + dep.to_stdout())
if dev_requirements:
output.append(style.blue('Requirements (dev):'))
for _, dep in sorted(
dev_requirements.items(),
key=lambda x: x[0].lower()
):
output.append(' ' + dep.to_stdout())
else:
output.append(
style.yellow(
'%s, try running %s.' % (
app, style.white('dj init')
)
)
)
return '\n'.join(output)
def to_stdout(self):
return '%s %s %s' % (
style.white(self.name),
style.gray('@'),
style.green(self.runtime.version)
) if self.name else style.yellow('No application')
|
aleontiev/dj
|
dj/application.py
|
Application.build
|
python
|
def build(self):
if self.exists:
self._build(
'requirements',
self.requirements_last_modified,
'pip install -U -r %s' % self.requirements_file
)
try:
self._build(
'requirements (dev)',
self.dev_requirements_last_modified,
'pip install -U -r %s' % self.dev_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find dev requirements')
)
try:
self._build(
'requirements (local)',
self.local_requirements_last_modified,
'pip install -U -r %s' % self.local_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find local requirements')
)
self._build(
'application',
self.setup_last_modified,
'python %s develop' % self.setup_file
)
|
Builds the app in the app's environment.
Only builds if the build is out-of-date and is non-empty.
Builds in 3 stages: requirements, dev requirements, and app.
pip is used to install requirements, and setup.py is used to
install the app itself.
Raises:
ValidationError if the app fails to build.
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/application.py#L224-L270
|
[
"def yellow(message):\n return click.style(message, fg='yellow', bold=True)\n",
"def _build(self, key, last_modified, cmd, verbose=True):\n token = self._get_build_token(key)\n last_built = get_last_touched(token)\n if not last_built or last_built < last_modified:\n self.stdout.write(style.format_command('Building', key))\n result = self.execute(cmd, verbose=False, capture=True)\n if 'pip' in cmd:\n deps = []\n for line in result.split('\\n'):\n splits = line.split(' ')\n if line.startswith('Successfully installed'):\n dep = splits[2]\n dep = '=='.join(dep.rsplit('-', 1))\n dep = Dependency(dep)\n deps.append((dep, style.green('+ ')))\n elif line.startswith('Requirement already satisfied: '):\n dep = splits[3]\n dep = Dependency(dep)\n deps.append((dep, style.yellow('. ')))\n elif 'Uninstalling' in line:\n index = line.index('Uninstalling')\n dep = line[index:].split(' ')[1]\n dep = ''.join(dep[0:len(dep) - 1])\n dep = '=='.join(dep.rsplit('-', 1))\n dep = Dependency(dep)\n deps.append((dep, style.red('- ')))\n\n for dep, prefix in sorted(\n deps,\n key=lambda x: str(x[0])\n ):\n self.stdout.write(prefix + dep.to_stdout())\n touch(token)\n"
] |
class Application(object):
def __init__(
self,
stdout=None,
directory=None
):
self.stdout = stdout or _stdout
current = os.getcwd()
nearest_setup_file = find_nearest(current, 'setup.py')
self.directory = directory or (
os.path.dirname(
nearest_setup_file
) if nearest_setup_file else current
)
self.config = Config(self.directory)
self.setup_file = os.path.join(
self.directory,
'setup.py'
)
self.requirements_file = os.path.join(
self.directory,
self.config.get('requirements')
)
self.dev_requirements_file = os.path.join(
self.directory,
self.config.get('devRequirements'),
)
self.local_requirements_file = os.path.join(
self.directory,
self.config.get('localRequirements'),
)
self.runtime = Runtime(self.config.get('runtime'))
def __unicode__(self):
return '%s (%s)' % (self.name, self.directory)
@property
def exists(self):
if not hasattr(self, '_exists'):
self._exists = os.path.exists(self.setup_file)
return self._exists
@staticmethod
def parse_application_name(setup_filename):
"""Parse a setup.py file for the name.
Returns:
name, or None
"""
with open(setup_filename, 'rt') as setup_file:
fst = RedBaron(setup_file.read())
for node in fst:
if (
node.type == 'atomtrailers' and
str(node.name) == 'setup'
):
for call in node.call:
if str(call.name) == 'name':
value = call.value
if hasattr(value, 'to_python'):
value = value.to_python()
name = str(value)
break
if name:
break
return name
def _get_name(self):
name = self.config.get('name')
if name:
return name
if self.exists:
try:
name = Application.parse_application_name(self.setup_file)
except Exception:
name = 'unknown'
self.config.set('name', name)
self.config.save()
return name
@property
def name(self):
if not hasattr(self, '_name'):
self._name = self._get_name()
return self._name
@property
def addons(self):
if not hasattr(self, '_addons'):
self._addons = {
a.name: a for a in self.get_addons()
}
return self._addons
def get_addons(self):
self.build()
addons = []
for directory in get_directories(
self.environment.package_directory,
filter=lambda x: x.endswith('/blueprints')
):
parent_directory = '/'.join(directory.split('/')[0:-1])
name = os.path.basename(parent_directory)
addons.append(Addon(name, parent_directory))
return addons
def refresh(self):
if hasattr(self, '_name'):
del self._name
if hasattr(self, '_blueprints'):
del self._blueprints
if hasattr(self, '_addons'):
del self._addons
if hasattr(self, '_exists'):
del self._exists
@property
def blueprints(self):
if not hasattr(self, '_blueprints'):
self._blueprints = {}
for b in self.get_blueprints():
# add by full name, e.g. dj.model
self._blueprints[b.full_name] = b
if not b.addon or b.name not in self._blueprints:
# for blueprints other that init or core,
# add them to the global namespace
self._blueprints[b.name] = b
return self._blueprints
def get_blueprints(self):
addons = self.addons.values()
blueprints = [a.blueprints.values() for a in addons]
return get_core_blueprints() + [x for s in blueprints for x in s]
@property
def requirements_last_modified(self):
return get_last_touched(self.requirements_file)
@property
def dev_requirements_last_modified(self):
return get_last_touched(self.dev_requirements_file)
@property
def local_requirements_last_modified(self):
return get_last_touched(self.local_requirements_file)
@property
def setup_last_modified(self):
# timestamp of last setup.py change
return get_last_touched(self.setup_file)
@property
def environment(self):
if not hasattr(self, '_environment'):
self._environment = self.runtime.create_environment(
self.config.environment_path
)
return self._environment
def _get_build_token(self, key):
return os.path.join(
self.environment.virtual_directory, 'build.%s' % key
)
def _build(self, key, last_modified, cmd, verbose=True):
token = self._get_build_token(key)
last_built = get_last_touched(token)
if not last_built or last_built < last_modified:
self.stdout.write(style.format_command('Building', key))
result = self.execute(cmd, verbose=False, capture=True)
if 'pip' in cmd:
deps = []
for line in result.split('\n'):
splits = line.split(' ')
if line.startswith('Successfully installed'):
dep = splits[2]
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.green('+ ')))
elif line.startswith('Requirement already satisfied: '):
dep = splits[3]
dep = Dependency(dep)
deps.append((dep, style.yellow('. ')))
elif 'Uninstalling' in line:
index = line.index('Uninstalling')
dep = line[index:].split(' ')[1]
dep = ''.join(dep[0:len(dep) - 1])
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.red('- ')))
for dep, prefix in sorted(
deps,
key=lambda x: str(x[0])
):
self.stdout.write(prefix + dep.to_stdout())
touch(token)
def execute(self, command, **kwargs):
return self.environment.execute(command, **kwargs)
def run(self, command, **kwargs):
self.build()
self.stdout.write(style.format_command('Running', command))
return self.execute(command, **kwargs)
def generate(self, blueprint, context, interactive=True):
"""Generate a blueprint within this application."""
if not isinstance(blueprint, Blueprint):
bp = self.blueprints.get(blueprint)
if not bp:
raise ValueError('%s is not a valid blueprint' % blueprint)
blueprint = bp
self.stdout.write(
style.format_command(
'Generating',
blueprint.full_name
)
)
generator = Generator(
self,
blueprint,
context,
interactive=interactive
)
result = generator.generate()
if blueprint.name == 'init':
# try re-setting the name
self.refresh()
return result
def get_dependency_manager(self, dev=False):
return DependencyManager(
os.path.join(
self.directory,
self.dev_requirements_file if dev else self.requirements_file
)
)
def add(self, addon, dev=False, interactive=True):
"""Add a new dependency and install it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
existing = dependencies.get(addon)
self.stdout.write(style.format_command('Adding', addon))
dependencies.add(addon)
try:
# try running the build
self.build()
self.refresh()
# remove version of this in other requirements file
other_dependencies.remove(addon, warn=False)
# run new addon constructor
constructor_name = '%s.init' % Dependency(addon).module_name
constructor = self.blueprints.get(constructor_name)
if constructor:
context = constructor.load_context().main(
[], standalone_mode=False
)
self.generate(constructor, context, interactive=interactive)
except Exception as e:
# restore original settings
self.stdout.write(style.red(str(e)))
self.stdout.write(
style.yellow('Could not find %s' % addon)
)
dependencies.remove(addon)
if existing:
dependencies.add(existing)
return
def remove(self, addon, dev=False):
"""Remove a dependency and uninstall it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command('Removing', addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False)
if removed:
self.build()
else:
exception = '%s is not installed.' % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception))
def info(self):
output = []
dev_requirements = self.get_dependency_manager(dev=True).dependencies
requirements = self.get_dependency_manager(dev=False).dependencies
app = self.to_stdout()
if self.exists:
output.append(style.blue('Application:\n %s' % app))
if requirements:
output.append(style.blue('Requirements:'))
for _, dep in sorted(
requirements.items(),
key=lambda x: x[0].lower()):
output.append(' ' + dep.to_stdout())
if dev_requirements:
output.append(style.blue('Requirements (dev):'))
for _, dep in sorted(
dev_requirements.items(),
key=lambda x: x[0].lower()
):
output.append(' ' + dep.to_stdout())
else:
output.append(
style.yellow(
'%s, try running %s.' % (
app, style.white('dj init')
)
)
)
return '\n'.join(output)
def to_stdout(self):
return '%s %s %s' % (
style.white(self.name),
style.gray('@'),
style.green(self.runtime.version)
) if self.name else style.yellow('No application')
|
aleontiev/dj
|
dj/application.py
|
Application.generate
|
python
|
def generate(self, blueprint, context, interactive=True):
if not isinstance(blueprint, Blueprint):
bp = self.blueprints.get(blueprint)
if not bp:
raise ValueError('%s is not a valid blueprint' % blueprint)
blueprint = bp
self.stdout.write(
style.format_command(
'Generating',
blueprint.full_name
)
)
generator = Generator(
self,
blueprint,
context,
interactive=interactive
)
result = generator.generate()
if blueprint.name == 'init':
# try re-setting the name
self.refresh()
return result
|
Generate a blueprint within this application.
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/application.py#L280-L304
|
[
"def format_command(a, b='', prefix=''):\n return (\n white(prefix) +\n blue('%s: ' % a) +\n white(b)\n )\n",
"def refresh(self):\n if hasattr(self, '_name'):\n del self._name\n if hasattr(self, '_blueprints'):\n del self._blueprints\n if hasattr(self, '_addons'):\n del self._addons\n if hasattr(self, '_exists'):\n del self._exists\n",
"def generate(self):\n \"\"\"Generate the blueprint.\"\"\"\n self.render()\n self.merge()\n"
] |
class Application(object):
def __init__(
self,
stdout=None,
directory=None
):
self.stdout = stdout or _stdout
current = os.getcwd()
nearest_setup_file = find_nearest(current, 'setup.py')
self.directory = directory or (
os.path.dirname(
nearest_setup_file
) if nearest_setup_file else current
)
self.config = Config(self.directory)
self.setup_file = os.path.join(
self.directory,
'setup.py'
)
self.requirements_file = os.path.join(
self.directory,
self.config.get('requirements')
)
self.dev_requirements_file = os.path.join(
self.directory,
self.config.get('devRequirements'),
)
self.local_requirements_file = os.path.join(
self.directory,
self.config.get('localRequirements'),
)
self.runtime = Runtime(self.config.get('runtime'))
def __unicode__(self):
return '%s (%s)' % (self.name, self.directory)
@property
def exists(self):
if not hasattr(self, '_exists'):
self._exists = os.path.exists(self.setup_file)
return self._exists
@staticmethod
def parse_application_name(setup_filename):
"""Parse a setup.py file for the name.
Returns:
name, or None
"""
with open(setup_filename, 'rt') as setup_file:
fst = RedBaron(setup_file.read())
for node in fst:
if (
node.type == 'atomtrailers' and
str(node.name) == 'setup'
):
for call in node.call:
if str(call.name) == 'name':
value = call.value
if hasattr(value, 'to_python'):
value = value.to_python()
name = str(value)
break
if name:
break
return name
def _get_name(self):
name = self.config.get('name')
if name:
return name
if self.exists:
try:
name = Application.parse_application_name(self.setup_file)
except Exception:
name = 'unknown'
self.config.set('name', name)
self.config.save()
return name
@property
def name(self):
if not hasattr(self, '_name'):
self._name = self._get_name()
return self._name
@property
def addons(self):
if not hasattr(self, '_addons'):
self._addons = {
a.name: a for a in self.get_addons()
}
return self._addons
def get_addons(self):
self.build()
addons = []
for directory in get_directories(
self.environment.package_directory,
filter=lambda x: x.endswith('/blueprints')
):
parent_directory = '/'.join(directory.split('/')[0:-1])
name = os.path.basename(parent_directory)
addons.append(Addon(name, parent_directory))
return addons
def refresh(self):
if hasattr(self, '_name'):
del self._name
if hasattr(self, '_blueprints'):
del self._blueprints
if hasattr(self, '_addons'):
del self._addons
if hasattr(self, '_exists'):
del self._exists
@property
def blueprints(self):
if not hasattr(self, '_blueprints'):
self._blueprints = {}
for b in self.get_blueprints():
# add by full name, e.g. dj.model
self._blueprints[b.full_name] = b
if not b.addon or b.name not in self._blueprints:
# for blueprints other that init or core,
# add them to the global namespace
self._blueprints[b.name] = b
return self._blueprints
def get_blueprints(self):
addons = self.addons.values()
blueprints = [a.blueprints.values() for a in addons]
return get_core_blueprints() + [x for s in blueprints for x in s]
@property
def requirements_last_modified(self):
return get_last_touched(self.requirements_file)
@property
def dev_requirements_last_modified(self):
return get_last_touched(self.dev_requirements_file)
@property
def local_requirements_last_modified(self):
return get_last_touched(self.local_requirements_file)
@property
def setup_last_modified(self):
# timestamp of last setup.py change
return get_last_touched(self.setup_file)
@property
def environment(self):
if not hasattr(self, '_environment'):
self._environment = self.runtime.create_environment(
self.config.environment_path
)
return self._environment
def _get_build_token(self, key):
return os.path.join(
self.environment.virtual_directory, 'build.%s' % key
)
def _build(self, key, last_modified, cmd, verbose=True):
token = self._get_build_token(key)
last_built = get_last_touched(token)
if not last_built or last_built < last_modified:
self.stdout.write(style.format_command('Building', key))
result = self.execute(cmd, verbose=False, capture=True)
if 'pip' in cmd:
deps = []
for line in result.split('\n'):
splits = line.split(' ')
if line.startswith('Successfully installed'):
dep = splits[2]
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.green('+ ')))
elif line.startswith('Requirement already satisfied: '):
dep = splits[3]
dep = Dependency(dep)
deps.append((dep, style.yellow('. ')))
elif 'Uninstalling' in line:
index = line.index('Uninstalling')
dep = line[index:].split(' ')[1]
dep = ''.join(dep[0:len(dep) - 1])
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.red('- ')))
for dep, prefix in sorted(
deps,
key=lambda x: str(x[0])
):
self.stdout.write(prefix + dep.to_stdout())
touch(token)
def build(self):
"""Builds the app in the app's environment.
Only builds if the build is out-of-date and is non-empty.
Builds in 3 stages: requirements, dev requirements, and app.
pip is used to install requirements, and setup.py is used to
install the app itself.
Raises:
ValidationError if the app fails to build.
"""
if self.exists:
self._build(
'requirements',
self.requirements_last_modified,
'pip install -U -r %s' % self.requirements_file
)
try:
self._build(
'requirements (dev)',
self.dev_requirements_last_modified,
'pip install -U -r %s' % self.dev_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find dev requirements')
)
try:
self._build(
'requirements (local)',
self.local_requirements_last_modified,
'pip install -U -r %s' % self.local_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find local requirements')
)
self._build(
'application',
self.setup_last_modified,
'python %s develop' % self.setup_file
)
def execute(self, command, **kwargs):
return self.environment.execute(command, **kwargs)
def run(self, command, **kwargs):
self.build()
self.stdout.write(style.format_command('Running', command))
return self.execute(command, **kwargs)
def get_dependency_manager(self, dev=False):
return DependencyManager(
os.path.join(
self.directory,
self.dev_requirements_file if dev else self.requirements_file
)
)
def add(self, addon, dev=False, interactive=True):
"""Add a new dependency and install it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
existing = dependencies.get(addon)
self.stdout.write(style.format_command('Adding', addon))
dependencies.add(addon)
try:
# try running the build
self.build()
self.refresh()
# remove version of this in other requirements file
other_dependencies.remove(addon, warn=False)
# run new addon constructor
constructor_name = '%s.init' % Dependency(addon).module_name
constructor = self.blueprints.get(constructor_name)
if constructor:
context = constructor.load_context().main(
[], standalone_mode=False
)
self.generate(constructor, context, interactive=interactive)
except Exception as e:
# restore original settings
self.stdout.write(style.red(str(e)))
self.stdout.write(
style.yellow('Could not find %s' % addon)
)
dependencies.remove(addon)
if existing:
dependencies.add(existing)
return
def remove(self, addon, dev=False):
"""Remove a dependency and uninstall it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command('Removing', addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False)
if removed:
self.build()
else:
exception = '%s is not installed.' % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception))
def info(self):
output = []
dev_requirements = self.get_dependency_manager(dev=True).dependencies
requirements = self.get_dependency_manager(dev=False).dependencies
app = self.to_stdout()
if self.exists:
output.append(style.blue('Application:\n %s' % app))
if requirements:
output.append(style.blue('Requirements:'))
for _, dep in sorted(
requirements.items(),
key=lambda x: x[0].lower()):
output.append(' ' + dep.to_stdout())
if dev_requirements:
output.append(style.blue('Requirements (dev):'))
for _, dep in sorted(
dev_requirements.items(),
key=lambda x: x[0].lower()
):
output.append(' ' + dep.to_stdout())
else:
output.append(
style.yellow(
'%s, try running %s.' % (
app, style.white('dj init')
)
)
)
return '\n'.join(output)
def to_stdout(self):
return '%s %s %s' % (
style.white(self.name),
style.gray('@'),
style.green(self.runtime.version)
) if self.name else style.yellow('No application')
|
aleontiev/dj
|
dj/application.py
|
Application.add
|
python
|
def add(self, addon, dev=False, interactive=True):
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
existing = dependencies.get(addon)
self.stdout.write(style.format_command('Adding', addon))
dependencies.add(addon)
try:
# try running the build
self.build()
self.refresh()
# remove version of this in other requirements file
other_dependencies.remove(addon, warn=False)
# run new addon constructor
constructor_name = '%s.init' % Dependency(addon).module_name
constructor = self.blueprints.get(constructor_name)
if constructor:
context = constructor.load_context().main(
[], standalone_mode=False
)
self.generate(constructor, context, interactive=interactive)
except Exception as e:
# restore original settings
self.stdout.write(style.red(str(e)))
self.stdout.write(
style.yellow('Could not find %s' % addon)
)
dependencies.remove(addon)
if existing:
dependencies.add(existing)
return
|
Add a new dependency and install it.
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/application.py#L314-L347
|
[
"def format_command(a, b='', prefix=''):\n return (\n white(prefix) +\n blue('%s: ' % a) +\n white(b)\n )\n",
"def yellow(message):\n return click.style(message, fg='yellow', bold=True)\n",
"def red(message):\n return click.style(message, fg='red', bold=True)\n",
"def refresh(self):\n if hasattr(self, '_name'):\n del self._name\n if hasattr(self, '_blueprints'):\n del self._blueprints\n if hasattr(self, '_addons'):\n del self._addons\n if hasattr(self, '_exists'):\n del self._exists\n",
"def build(self):\n \"\"\"Builds the app in the app's environment.\n\n Only builds if the build is out-of-date and is non-empty.\n Builds in 3 stages: requirements, dev requirements, and app.\n pip is used to install requirements, and setup.py is used to\n install the app itself.\n\n Raises:\n ValidationError if the app fails to build.\n \"\"\"\n\n if self.exists:\n self._build(\n 'requirements',\n self.requirements_last_modified,\n 'pip install -U -r %s' % self.requirements_file\n )\n try:\n self._build(\n 'requirements (dev)',\n self.dev_requirements_last_modified,\n 'pip install -U -r %s' % self.dev_requirements_file\n )\n except Exception as e:\n if 'No such file' not in str(e):\n raise e\n self.stdout.write(\n style.yellow('Could not find dev requirements')\n )\n try:\n self._build(\n 'requirements (local)',\n self.local_requirements_last_modified,\n 'pip install -U -r %s' % self.local_requirements_file\n )\n except Exception as e:\n if 'No such file' not in str(e):\n raise e\n self.stdout.write(\n style.yellow('Could not find local requirements')\n )\n self._build(\n 'application',\n self.setup_last_modified,\n 'python %s develop' % self.setup_file\n )\n",
"def generate(self, blueprint, context, interactive=True):\n \"\"\"Generate a blueprint within this application.\"\"\"\n if not isinstance(blueprint, Blueprint):\n bp = self.blueprints.get(blueprint)\n if not bp:\n raise ValueError('%s is not a valid blueprint' % blueprint)\n blueprint = bp\n\n self.stdout.write(\n style.format_command(\n 'Generating',\n blueprint.full_name\n )\n )\n generator = Generator(\n self,\n blueprint,\n context,\n interactive=interactive\n )\n result = generator.generate()\n if blueprint.name == 'init':\n # try re-setting the name\n self.refresh()\n return result\n",
"def get_dependency_manager(self, dev=False):\n return DependencyManager(\n os.path.join(\n self.directory,\n self.dev_requirements_file if dev else self.requirements_file\n )\n )\n"
] |
class Application(object):
def __init__(
self,
stdout=None,
directory=None
):
self.stdout = stdout or _stdout
current = os.getcwd()
nearest_setup_file = find_nearest(current, 'setup.py')
self.directory = directory or (
os.path.dirname(
nearest_setup_file
) if nearest_setup_file else current
)
self.config = Config(self.directory)
self.setup_file = os.path.join(
self.directory,
'setup.py'
)
self.requirements_file = os.path.join(
self.directory,
self.config.get('requirements')
)
self.dev_requirements_file = os.path.join(
self.directory,
self.config.get('devRequirements'),
)
self.local_requirements_file = os.path.join(
self.directory,
self.config.get('localRequirements'),
)
self.runtime = Runtime(self.config.get('runtime'))
def __unicode__(self):
return '%s (%s)' % (self.name, self.directory)
@property
def exists(self):
if not hasattr(self, '_exists'):
self._exists = os.path.exists(self.setup_file)
return self._exists
@staticmethod
def parse_application_name(setup_filename):
"""Parse a setup.py file for the name.
Returns:
name, or None
"""
with open(setup_filename, 'rt') as setup_file:
fst = RedBaron(setup_file.read())
for node in fst:
if (
node.type == 'atomtrailers' and
str(node.name) == 'setup'
):
for call in node.call:
if str(call.name) == 'name':
value = call.value
if hasattr(value, 'to_python'):
value = value.to_python()
name = str(value)
break
if name:
break
return name
def _get_name(self):
name = self.config.get('name')
if name:
return name
if self.exists:
try:
name = Application.parse_application_name(self.setup_file)
except Exception:
name = 'unknown'
self.config.set('name', name)
self.config.save()
return name
@property
def name(self):
if not hasattr(self, '_name'):
self._name = self._get_name()
return self._name
@property
def addons(self):
if not hasattr(self, '_addons'):
self._addons = {
a.name: a for a in self.get_addons()
}
return self._addons
def get_addons(self):
self.build()
addons = []
for directory in get_directories(
self.environment.package_directory,
filter=lambda x: x.endswith('/blueprints')
):
parent_directory = '/'.join(directory.split('/')[0:-1])
name = os.path.basename(parent_directory)
addons.append(Addon(name, parent_directory))
return addons
def refresh(self):
if hasattr(self, '_name'):
del self._name
if hasattr(self, '_blueprints'):
del self._blueprints
if hasattr(self, '_addons'):
del self._addons
if hasattr(self, '_exists'):
del self._exists
@property
def blueprints(self):
if not hasattr(self, '_blueprints'):
self._blueprints = {}
for b in self.get_blueprints():
# add by full name, e.g. dj.model
self._blueprints[b.full_name] = b
if not b.addon or b.name not in self._blueprints:
# for blueprints other that init or core,
# add them to the global namespace
self._blueprints[b.name] = b
return self._blueprints
def get_blueprints(self):
addons = self.addons.values()
blueprints = [a.blueprints.values() for a in addons]
return get_core_blueprints() + [x for s in blueprints for x in s]
@property
def requirements_last_modified(self):
return get_last_touched(self.requirements_file)
@property
def dev_requirements_last_modified(self):
return get_last_touched(self.dev_requirements_file)
@property
def local_requirements_last_modified(self):
return get_last_touched(self.local_requirements_file)
@property
def setup_last_modified(self):
# timestamp of last setup.py change
return get_last_touched(self.setup_file)
@property
def environment(self):
if not hasattr(self, '_environment'):
self._environment = self.runtime.create_environment(
self.config.environment_path
)
return self._environment
def _get_build_token(self, key):
return os.path.join(
self.environment.virtual_directory, 'build.%s' % key
)
def _build(self, key, last_modified, cmd, verbose=True):
token = self._get_build_token(key)
last_built = get_last_touched(token)
if not last_built or last_built < last_modified:
self.stdout.write(style.format_command('Building', key))
result = self.execute(cmd, verbose=False, capture=True)
if 'pip' in cmd:
deps = []
for line in result.split('\n'):
splits = line.split(' ')
if line.startswith('Successfully installed'):
dep = splits[2]
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.green('+ ')))
elif line.startswith('Requirement already satisfied: '):
dep = splits[3]
dep = Dependency(dep)
deps.append((dep, style.yellow('. ')))
elif 'Uninstalling' in line:
index = line.index('Uninstalling')
dep = line[index:].split(' ')[1]
dep = ''.join(dep[0:len(dep) - 1])
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.red('- ')))
for dep, prefix in sorted(
deps,
key=lambda x: str(x[0])
):
self.stdout.write(prefix + dep.to_stdout())
touch(token)
def build(self):
"""Builds the app in the app's environment.
Only builds if the build is out-of-date and is non-empty.
Builds in 3 stages: requirements, dev requirements, and app.
pip is used to install requirements, and setup.py is used to
install the app itself.
Raises:
ValidationError if the app fails to build.
"""
if self.exists:
self._build(
'requirements',
self.requirements_last_modified,
'pip install -U -r %s' % self.requirements_file
)
try:
self._build(
'requirements (dev)',
self.dev_requirements_last_modified,
'pip install -U -r %s' % self.dev_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find dev requirements')
)
try:
self._build(
'requirements (local)',
self.local_requirements_last_modified,
'pip install -U -r %s' % self.local_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find local requirements')
)
self._build(
'application',
self.setup_last_modified,
'python %s develop' % self.setup_file
)
def execute(self, command, **kwargs):
return self.environment.execute(command, **kwargs)
def run(self, command, **kwargs):
self.build()
self.stdout.write(style.format_command('Running', command))
return self.execute(command, **kwargs)
def generate(self, blueprint, context, interactive=True):
"""Generate a blueprint within this application."""
if not isinstance(blueprint, Blueprint):
bp = self.blueprints.get(blueprint)
if not bp:
raise ValueError('%s is not a valid blueprint' % blueprint)
blueprint = bp
self.stdout.write(
style.format_command(
'Generating',
blueprint.full_name
)
)
generator = Generator(
self,
blueprint,
context,
interactive=interactive
)
result = generator.generate()
if blueprint.name == 'init':
# try re-setting the name
self.refresh()
return result
def get_dependency_manager(self, dev=False):
return DependencyManager(
os.path.join(
self.directory,
self.dev_requirements_file if dev else self.requirements_file
)
)
def remove(self, addon, dev=False):
"""Remove a dependency and uninstall it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command('Removing', addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False)
if removed:
self.build()
else:
exception = '%s is not installed.' % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception))
def info(self):
output = []
dev_requirements = self.get_dependency_manager(dev=True).dependencies
requirements = self.get_dependency_manager(dev=False).dependencies
app = self.to_stdout()
if self.exists:
output.append(style.blue('Application:\n %s' % app))
if requirements:
output.append(style.blue('Requirements:'))
for _, dep in sorted(
requirements.items(),
key=lambda x: x[0].lower()):
output.append(' ' + dep.to_stdout())
if dev_requirements:
output.append(style.blue('Requirements (dev):'))
for _, dep in sorted(
dev_requirements.items(),
key=lambda x: x[0].lower()
):
output.append(' ' + dep.to_stdout())
else:
output.append(
style.yellow(
'%s, try running %s.' % (
app, style.white('dj init')
)
)
)
return '\n'.join(output)
def to_stdout(self):
return '%s %s %s' % (
style.white(self.name),
style.gray('@'),
style.green(self.runtime.version)
) if self.name else style.yellow('No application')
|
aleontiev/dj
|
dj/application.py
|
Application.remove
|
python
|
def remove(self, addon, dev=False):
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command('Removing', addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False)
if removed:
self.build()
else:
exception = '%s is not installed.' % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception))
|
Remove a dependency and uninstall it.
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/application.py#L349-L362
|
[
"def format_command(a, b='', prefix=''):\n return (\n white(prefix) +\n blue('%s: ' % a) +\n white(b)\n )\n",
"def red(message):\n return click.style(message, fg='red', bold=True)\n",
"def build(self):\n \"\"\"Builds the app in the app's environment.\n\n Only builds if the build is out-of-date and is non-empty.\n Builds in 3 stages: requirements, dev requirements, and app.\n pip is used to install requirements, and setup.py is used to\n install the app itself.\n\n Raises:\n ValidationError if the app fails to build.\n \"\"\"\n\n if self.exists:\n self._build(\n 'requirements',\n self.requirements_last_modified,\n 'pip install -U -r %s' % self.requirements_file\n )\n try:\n self._build(\n 'requirements (dev)',\n self.dev_requirements_last_modified,\n 'pip install -U -r %s' % self.dev_requirements_file\n )\n except Exception as e:\n if 'No such file' not in str(e):\n raise e\n self.stdout.write(\n style.yellow('Could not find dev requirements')\n )\n try:\n self._build(\n 'requirements (local)',\n self.local_requirements_last_modified,\n 'pip install -U -r %s' % self.local_requirements_file\n )\n except Exception as e:\n if 'No such file' not in str(e):\n raise e\n self.stdout.write(\n style.yellow('Could not find local requirements')\n )\n self._build(\n 'application',\n self.setup_last_modified,\n 'python %s develop' % self.setup_file\n )\n",
"def get_dependency_manager(self, dev=False):\n return DependencyManager(\n os.path.join(\n self.directory,\n self.dev_requirements_file if dev else self.requirements_file\n )\n )\n",
"def to_stdout(self):\n return '%s %s %s' % (\n white(self.name),\n gray(self.operator),\n green(self.version)\n ) if self.operator else white(self.name)\n"
] |
class Application(object):
def __init__(
self,
stdout=None,
directory=None
):
self.stdout = stdout or _stdout
current = os.getcwd()
nearest_setup_file = find_nearest(current, 'setup.py')
self.directory = directory or (
os.path.dirname(
nearest_setup_file
) if nearest_setup_file else current
)
self.config = Config(self.directory)
self.setup_file = os.path.join(
self.directory,
'setup.py'
)
self.requirements_file = os.path.join(
self.directory,
self.config.get('requirements')
)
self.dev_requirements_file = os.path.join(
self.directory,
self.config.get('devRequirements'),
)
self.local_requirements_file = os.path.join(
self.directory,
self.config.get('localRequirements'),
)
self.runtime = Runtime(self.config.get('runtime'))
def __unicode__(self):
return '%s (%s)' % (self.name, self.directory)
@property
def exists(self):
if not hasattr(self, '_exists'):
self._exists = os.path.exists(self.setup_file)
return self._exists
@staticmethod
def parse_application_name(setup_filename):
"""Parse a setup.py file for the name.
Returns:
name, or None
"""
with open(setup_filename, 'rt') as setup_file:
fst = RedBaron(setup_file.read())
for node in fst:
if (
node.type == 'atomtrailers' and
str(node.name) == 'setup'
):
for call in node.call:
if str(call.name) == 'name':
value = call.value
if hasattr(value, 'to_python'):
value = value.to_python()
name = str(value)
break
if name:
break
return name
def _get_name(self):
name = self.config.get('name')
if name:
return name
if self.exists:
try:
name = Application.parse_application_name(self.setup_file)
except Exception:
name = 'unknown'
self.config.set('name', name)
self.config.save()
return name
@property
def name(self):
if not hasattr(self, '_name'):
self._name = self._get_name()
return self._name
@property
def addons(self):
if not hasattr(self, '_addons'):
self._addons = {
a.name: a for a in self.get_addons()
}
return self._addons
def get_addons(self):
self.build()
addons = []
for directory in get_directories(
self.environment.package_directory,
filter=lambda x: x.endswith('/blueprints')
):
parent_directory = '/'.join(directory.split('/')[0:-1])
name = os.path.basename(parent_directory)
addons.append(Addon(name, parent_directory))
return addons
def refresh(self):
if hasattr(self, '_name'):
del self._name
if hasattr(self, '_blueprints'):
del self._blueprints
if hasattr(self, '_addons'):
del self._addons
if hasattr(self, '_exists'):
del self._exists
@property
def blueprints(self):
if not hasattr(self, '_blueprints'):
self._blueprints = {}
for b in self.get_blueprints():
# add by full name, e.g. dj.model
self._blueprints[b.full_name] = b
if not b.addon or b.name not in self._blueprints:
# for blueprints other that init or core,
# add them to the global namespace
self._blueprints[b.name] = b
return self._blueprints
def get_blueprints(self):
addons = self.addons.values()
blueprints = [a.blueprints.values() for a in addons]
return get_core_blueprints() + [x for s in blueprints for x in s]
@property
def requirements_last_modified(self):
return get_last_touched(self.requirements_file)
@property
def dev_requirements_last_modified(self):
return get_last_touched(self.dev_requirements_file)
@property
def local_requirements_last_modified(self):
return get_last_touched(self.local_requirements_file)
@property
def setup_last_modified(self):
# timestamp of last setup.py change
return get_last_touched(self.setup_file)
@property
def environment(self):
if not hasattr(self, '_environment'):
self._environment = self.runtime.create_environment(
self.config.environment_path
)
return self._environment
def _get_build_token(self, key):
return os.path.join(
self.environment.virtual_directory, 'build.%s' % key
)
def _build(self, key, last_modified, cmd, verbose=True):
token = self._get_build_token(key)
last_built = get_last_touched(token)
if not last_built or last_built < last_modified:
self.stdout.write(style.format_command('Building', key))
result = self.execute(cmd, verbose=False, capture=True)
if 'pip' in cmd:
deps = []
for line in result.split('\n'):
splits = line.split(' ')
if line.startswith('Successfully installed'):
dep = splits[2]
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.green('+ ')))
elif line.startswith('Requirement already satisfied: '):
dep = splits[3]
dep = Dependency(dep)
deps.append((dep, style.yellow('. ')))
elif 'Uninstalling' in line:
index = line.index('Uninstalling')
dep = line[index:].split(' ')[1]
dep = ''.join(dep[0:len(dep) - 1])
dep = '=='.join(dep.rsplit('-', 1))
dep = Dependency(dep)
deps.append((dep, style.red('- ')))
for dep, prefix in sorted(
deps,
key=lambda x: str(x[0])
):
self.stdout.write(prefix + dep.to_stdout())
touch(token)
def build(self):
"""Builds the app in the app's environment.
Only builds if the build is out-of-date and is non-empty.
Builds in 3 stages: requirements, dev requirements, and app.
pip is used to install requirements, and setup.py is used to
install the app itself.
Raises:
ValidationError if the app fails to build.
"""
if self.exists:
self._build(
'requirements',
self.requirements_last_modified,
'pip install -U -r %s' % self.requirements_file
)
try:
self._build(
'requirements (dev)',
self.dev_requirements_last_modified,
'pip install -U -r %s' % self.dev_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find dev requirements')
)
try:
self._build(
'requirements (local)',
self.local_requirements_last_modified,
'pip install -U -r %s' % self.local_requirements_file
)
except Exception as e:
if 'No such file' not in str(e):
raise e
self.stdout.write(
style.yellow('Could not find local requirements')
)
self._build(
'application',
self.setup_last_modified,
'python %s develop' % self.setup_file
)
def execute(self, command, **kwargs):
return self.environment.execute(command, **kwargs)
def run(self, command, **kwargs):
self.build()
self.stdout.write(style.format_command('Running', command))
return self.execute(command, **kwargs)
def generate(self, blueprint, context, interactive=True):
"""Generate a blueprint within this application."""
if not isinstance(blueprint, Blueprint):
bp = self.blueprints.get(blueprint)
if not bp:
raise ValueError('%s is not a valid blueprint' % blueprint)
blueprint = bp
self.stdout.write(
style.format_command(
'Generating',
blueprint.full_name
)
)
generator = Generator(
self,
blueprint,
context,
interactive=interactive
)
result = generator.generate()
if blueprint.name == 'init':
# try re-setting the name
self.refresh()
return result
def get_dependency_manager(self, dev=False):
return DependencyManager(
os.path.join(
self.directory,
self.dev_requirements_file if dev else self.requirements_file
)
)
def add(self, addon, dev=False, interactive=True):
"""Add a new dependency and install it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
existing = dependencies.get(addon)
self.stdout.write(style.format_command('Adding', addon))
dependencies.add(addon)
try:
# try running the build
self.build()
self.refresh()
# remove version of this in other requirements file
other_dependencies.remove(addon, warn=False)
# run new addon constructor
constructor_name = '%s.init' % Dependency(addon).module_name
constructor = self.blueprints.get(constructor_name)
if constructor:
context = constructor.load_context().main(
[], standalone_mode=False
)
self.generate(constructor, context, interactive=interactive)
except Exception as e:
# restore original settings
self.stdout.write(style.red(str(e)))
self.stdout.write(
style.yellow('Could not find %s' % addon)
)
dependencies.remove(addon)
if existing:
dependencies.add(existing)
return
def info(self):
output = []
dev_requirements = self.get_dependency_manager(dev=True).dependencies
requirements = self.get_dependency_manager(dev=False).dependencies
app = self.to_stdout()
if self.exists:
output.append(style.blue('Application:\n %s' % app))
if requirements:
output.append(style.blue('Requirements:'))
for _, dep in sorted(
requirements.items(),
key=lambda x: x[0].lower()):
output.append(' ' + dep.to_stdout())
if dev_requirements:
output.append(style.blue('Requirements (dev):'))
for _, dep in sorted(
dev_requirements.items(),
key=lambda x: x[0].lower()
):
output.append(' ' + dep.to_stdout())
else:
output.append(
style.yellow(
'%s, try running %s.' % (
app, style.white('dj init')
)
)
)
return '\n'.join(output)
def to_stdout(self):
return '%s %s %s' % (
style.white(self.name),
style.gray('@'),
style.green(self.runtime.version)
) if self.name else style.yellow('No application')
|
aleontiev/dj
|
dj/commands/lint.py
|
lint
|
python
|
def lint(args):
application = get_current_application()
if not args:
args = [application.name, 'tests']
args = ['flake8'] + list(args)
run.main(args, standalone_mode=False)
|
Run lint checks using flake8.
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/commands/lint.py#L13-L19
|
[
"def get_current_application():\n global current_application\n if not current_application:\n current_application = Application()\n return current_application\n"
] |
from __future__ import absolute_import
import click
from dj.application import get_current_application
from .run import run
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
@click.command(
context_settings={
'ignore_unknown_options': True
}
)
|
aleontiev/dj
|
dj/utils/imports.py
|
load_module
|
python
|
def load_module(filename):
path, name = os.path.split(filename)
name, ext = os.path.splitext(name)
(file, filename, desc) = imp.find_module(name, [path])
try:
return imp.load_module(name, file, filename, desc)
finally:
if file:
file.close()
|
Loads a module from anywhere in the system.
Does not depend on or modify sys.path.
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/utils/imports.py#L5-L18
| null |
import imp
import os
|
aleontiev/dj
|
dj/commands/info.py
|
info
|
python
|
def info():
application = get_current_application()
info = application.info()
stdout.write(info)
return info
|
Display app info.
Examples:
$ dj info
No application, try running dj init.
$ dj info
Application:
foo @ 2.7.9
Requirements:
Django == 1.10
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/commands/info.py#L8-L26
|
[
"def get_current_application():\n global current_application\n if not current_application:\n current_application = Application()\n return current_application\n",
"def info(self):\n output = []\n dev_requirements = self.get_dependency_manager(dev=True).dependencies\n requirements = self.get_dependency_manager(dev=False).dependencies\n app = self.to_stdout()\n if self.exists:\n output.append(style.blue('Application:\\n %s' % app))\n if requirements:\n output.append(style.blue('Requirements:'))\n for _, dep in sorted(\n requirements.items(),\n key=lambda x: x[0].lower()):\n output.append(' ' + dep.to_stdout())\n if dev_requirements:\n output.append(style.blue('Requirements (dev):'))\n for _, dep in sorted(\n dev_requirements.items(),\n key=lambda x: x[0].lower()\n ):\n output.append(' ' + dep.to_stdout())\n else:\n output.append(\n style.yellow(\n '%s, try running %s.' % (\n app, style.white('dj init')\n )\n )\n )\n\n return '\\n'.join(output)\n",
"def write(self, message, **kwargs):\n copy = self.kwargs.copy()\n copy.update(kwargs)\n click.echo(click.style(message, **copy))\n"
] |
from __future__ import absolute_import
import click
from dj.application import get_current_application
from dj.utils.system import stdout
@click.command()
|
aleontiev/dj
|
dj/generator.py
|
Generator.render
|
python
|
def render(self):
context = self.context
if 'app' not in context:
context['app'] = self.application.name
temp_dir = self.temp_dir
templates_root = self.blueprint.templates_directory
for root, dirs, files in os.walk(templates_root):
for directory in dirs:
directory = os.path.join(root, directory)
directory = render_from_string(directory, context)
directory = directory.replace(templates_root, temp_dir, 1)
os.mkdir(directory)
for file in files:
full_file = os.path.join(root, file)
stat = os.stat(full_file)
content = render_from_file(full_file, context)
full_file = strip_extension(
render_from_string(full_file, context))
full_file = full_file.replace(templates_root, temp_dir, 1)
with open(full_file, 'w') as f:
f.write(content)
os.chmod(full_file, stat.st_mode)
|
Render the blueprint into a temp directory using the context.
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/generator.py#L44-L66
|
[
"def strip_extension(string):\n if string.endswith('.j2'):\n string = string[:-3]\n return string\n",
"def render_from_string(string, context):\n environment = Environment(undefined=StrictUndefined)\n return environment.from_string(string).render(**context)\n",
"def render_from_file(file, context):\n with open(file, 'r') as f:\n string = f.read()\n return render_from_string(string, context)\n"
] |
class Generator(object):
def __init__(
self,
application,
blueprint,
context,
interactive=True,
stdout=None
):
self.stdout = stdout or _stdout
self.application = application
self.interactive = interactive
self.blueprint = blueprint
self.context = context
self.temp_dir = tempfile.mkdtemp()
def __del__(self):
try:
shutil.rmtree(self.temp_dir)
finally:
self.temp_dir = None
def generate(self):
"""Generate the blueprint."""
self.render()
self.merge()
def merge(self):
"""Merges the rendered blueprint into the application."""
temp_dir = self.temp_dir
app_dir = self.application.directory
for root, dirs, files in os.walk(temp_dir):
for directory in dirs:
directory = os.path.join(root, directory)
directory = directory.replace(temp_dir, app_dir, 1)
try:
os.mkdir(directory)
except OSError:
pass
for file in files:
source = os.path.join(root, file)
target = source.replace(temp_dir, app_dir, 1)
relative_target = target.replace(app_dir, '.')
action = 'r'
if (
os.path.exists(target)
and not filecmp.cmp(source, target, shallow=False)
and os.stat(target).st_size > 0
):
# target exists, is not empty, and does not
# match source
if target.endswith('__init__.py'):
# default merge __init__.py files
# if non-empty, these should only
# contain imports from submoduiles
action = 'm'
elif target.endswith('base.py'):
# default skip base.py files
# these should be extended by the developer
action = 's'
else:
default = 'm'
action = click.prompt(
style.prompt(
'%s already exists, '
'[r]eplace, [s]kip, or [m]erge?' % (
relative_target
),
),
default=style.default(default)
) if self.interactive else default
action = click.unstyle(action).lower()
if action not in {'r', 'm', 's'}:
action = default
if action == 's':
self.stdout.write(
'? %s' % style.white(relative_target),
fg='yellow'
)
continue
if action == 'r':
with open(source, 'r') as source_file:
with open(target, 'w') as target_file:
target_file.write(source_file.read())
self.stdout.write(
style.green(
'+ %s' % style.white(relative_target)
)
)
if action == 'm':
with open(target, 'r') as target_file:
with open(source, 'r') as source_file:
merged = merge(
target_file.read(),
source_file.read()
)
with open(target, 'w') as target_file:
target_file.write(merged)
self.stdout.write(
style.yellow('> %s' % style.white(relative_target))
)
|
aleontiev/dj
|
dj/generator.py
|
Generator.merge
|
python
|
def merge(self):
temp_dir = self.temp_dir
app_dir = self.application.directory
for root, dirs, files in os.walk(temp_dir):
for directory in dirs:
directory = os.path.join(root, directory)
directory = directory.replace(temp_dir, app_dir, 1)
try:
os.mkdir(directory)
except OSError:
pass
for file in files:
source = os.path.join(root, file)
target = source.replace(temp_dir, app_dir, 1)
relative_target = target.replace(app_dir, '.')
action = 'r'
if (
os.path.exists(target)
and not filecmp.cmp(source, target, shallow=False)
and os.stat(target).st_size > 0
):
# target exists, is not empty, and does not
# match source
if target.endswith('__init__.py'):
# default merge __init__.py files
# if non-empty, these should only
# contain imports from submoduiles
action = 'm'
elif target.endswith('base.py'):
# default skip base.py files
# these should be extended by the developer
action = 's'
else:
default = 'm'
action = click.prompt(
style.prompt(
'%s already exists, '
'[r]eplace, [s]kip, or [m]erge?' % (
relative_target
),
),
default=style.default(default)
) if self.interactive else default
action = click.unstyle(action).lower()
if action not in {'r', 'm', 's'}:
action = default
if action == 's':
self.stdout.write(
'? %s' % style.white(relative_target),
fg='yellow'
)
continue
if action == 'r':
with open(source, 'r') as source_file:
with open(target, 'w') as target_file:
target_file.write(source_file.read())
self.stdout.write(
style.green(
'+ %s' % style.white(relative_target)
)
)
if action == 'm':
with open(target, 'r') as target_file:
with open(source, 'r') as source_file:
merged = merge(
target_file.read(),
source_file.read()
)
with open(target, 'w') as target_file:
target_file.write(merged)
self.stdout.write(
style.yellow('> %s' % style.white(relative_target))
)
|
Merges the rendered blueprint into the application.
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/generator.py#L68-L143
|
[
"def green(message):\n return click.style(message, fg='green', bold=True)\n",
"def white(message):\n return click.style(message, fg='white', bold=True)\n"
] |
class Generator(object):
def __init__(
self,
application,
blueprint,
context,
interactive=True,
stdout=None
):
self.stdout = stdout or _stdout
self.application = application
self.interactive = interactive
self.blueprint = blueprint
self.context = context
self.temp_dir = tempfile.mkdtemp()
def __del__(self):
try:
shutil.rmtree(self.temp_dir)
finally:
self.temp_dir = None
def generate(self):
"""Generate the blueprint."""
self.render()
self.merge()
def render(self):
"""Render the blueprint into a temp directory using the context."""
context = self.context
if 'app' not in context:
context['app'] = self.application.name
temp_dir = self.temp_dir
templates_root = self.blueprint.templates_directory
for root, dirs, files in os.walk(templates_root):
for directory in dirs:
directory = os.path.join(root, directory)
directory = render_from_string(directory, context)
directory = directory.replace(templates_root, temp_dir, 1)
os.mkdir(directory)
for file in files:
full_file = os.path.join(root, file)
stat = os.stat(full_file)
content = render_from_file(full_file, context)
full_file = strip_extension(
render_from_string(full_file, context))
full_file = full_file.replace(templates_root, temp_dir, 1)
with open(full_file, 'w') as f:
f.write(content)
os.chmod(full_file, stat.st_mode)
|
aleontiev/dj
|
dj/commands/run.py
|
run
|
python
|
def run(quiet, args):
if not args:
raise ClickException('pass a command to run')
cmd = ' '.join(args)
application = get_current_application()
name = application.name
settings = os.environ.get('DJANGO_SETTINGS_MODULE', '%s.settings' % name)
return application.run(
cmd,
verbose=not quiet,
abort=False,
capture=True,
env={
'DJANGO_SETTINGS_MODULE': settings
}
)
|
Run a local command.
Examples:
$ django run manage.py runserver
...
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/commands/run.py#L17-L41
|
[
"def get_current_application():\n global current_application\n if not current_application:\n current_application = Application()\n return current_application\n",
"def run(self, command, **kwargs):\n self.build()\n self.stdout.write(style.format_command('Running', command))\n return self.execute(command, **kwargs)\n"
] |
from __future__ import absolute_import
import click
import os
from click.exceptions import ClickException
from dj.application import get_current_application
from dj.utils import style
from .base import stdout
@click.option('--quiet', is_flag=True, default=False)
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
@click.command(
context_settings={
'ignore_unknown_options': True
}
)
|
aleontiev/dj
|
dj/commands/server.py
|
server
|
python
|
def server(port):
args = ['python', 'manage.py', 'runserver']
if port:
args.append(port)
run.main(args)
|
Start the Django dev server.
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/commands/server.py#L8-L13
| null |
from __future__ import absolute_import
import click
from .run import run
@click.command()
@click.argument('port', required=False)
|
aleontiev/dj
|
dj/commands/remove.py
|
remove
|
python
|
def remove(addon, dev):
application = get_current_application()
application.remove(addon, dev=dev)
|
Remove a dependency.
Examples:
$ django remove dynamic-rest
- dynamic-rest == 1.5.0
|
train
|
https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/commands/remove.py#L9-L19
|
[
"def get_current_application():\n global current_application\n if not current_application:\n current_application = Application()\n return current_application\n",
"def remove(self, addon, dev=False):\n \"\"\"Remove a dependency and uninstall it.\"\"\"\n dependencies = self.get_dependency_manager(dev=dev)\n other_dependencies = self.get_dependency_manager(dev=not dev)\n self.stdout.write(style.format_command('Removing', addon))\n removed = dependencies.remove(addon, warn=False)\n if not removed:\n removed = other_dependencies.remove(addon, warn=False)\n\n if removed:\n self.build()\n else:\n exception = '%s is not installed.' % Dependency(addon).to_stdout()\n self.stdout.write(style.red(exception))\n"
] |
from __future__ import absolute_import
import click
from dj.application import get_current_application
@click.argument('addon')
@click.option('--dev', is_flag=True)
@click.command()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.